Graph convolutions in Keras - graph

How can we implement graph convolutions in Keras?
Ideally in the form of a layer accepting 2 inputs - the set (as time-sequence) of nodes and (same time dimension length) set of integer indexes (into the time dimension) of each node's neighbours.

If we would be able to gather items into the style and shape of Conv layers, we could use normal convolutions.
The gather can be done using this Keras layer which uses tensorflow's gather.
class GatherFromIndices(Layer):
"""
To have a graph convolution (over a fixed/fixed degree kernel) from a given sequence of nodes, we need to gather
the data of each node's neighbours before running a simple Conv1D/conv2D,
that would be effectively a defined convolution (or even TimeDistributed(Dense()) can be used - only
based on data format we would output).
This layer should do exactly that.
Does not support non integer values, values lesser than 0 zre automatically masked.
"""
def __init__(self, mask_value=0, include_self=True, flatten_indices_features=False, **kwargs):
Layer.__init__(self, **kwargs)
self.mask_value = mask_value
self.include_self = include_self
self.flatten_indices_features = flatten_indices_features
def get_config(self):
config = {'mask_value': self.mask_value,
'include_self': self.include_self,
'flatten_indices_features': self.flatten_indices_features,
}
base_config = super(GatherFromIndices, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
#def build(self, input_shape):
#self.built = True
def compute_output_shape(self, input_shape):
inp_shape, inds_shape = input_shape
indices = inds_shape[-1]
if self.include_self:
indices += 1
features = inp_shape[-1]
if self.flatten_indices_features:
return tuple(list(inds_shape[:-1]) + [indices * features])
else:
return tuple(list(inds_shape[:-1]) + [indices, features])
def call(self, inputs, training=None):
inp, inds = inputs
# assumes input in the shape of (inp=[...,batches, sequence_len, features],
# inds = [...,batches,sequence_ind_len, neighbours]... indexing into inp)
# for output we want to get [...,batches,sequence_ind_len, indices,features]
assert_shapes = tf.Assert(tf.reduce_all(tf.equal(tf.shape(inp)[:-2], tf.shape(inds)[:-2])), [inp])
assert_positive_ins_shape = tf.Assert(tf.reduce_all(tf.greater(tf.shape(inds), 0)), [inds])
# the shapes need to be the same (with the exception of the last dimension)
with tf.control_dependencies([assert_shapes, assert_positive_ins_shape]):
inp_shape = tf.shape(inp)
inds_shape = tf.shape(inds)
features_dim = -1
# ^^ todo for future variablility of the last dimension, because maybe can be made to take not the last
# dimension as features, but something else.
inp_p = tf.reshape(inp, [-1, inp_shape[features_dim]])
ins_p = tf.reshape(inds, [-1, inds_shape[features_dim]])
# we have lost the batchdimension by reshaping, so we save it by adding the size to the respective indexes
# we do it because we use the gather_nd as nonbatched (so we do not need to provide batch indices)
resized_range = tf.range(tf.shape(ins_p)[0])
different_seqs_ids_float = tf.scalar_mul(1.0 / tf.to_float(inds_shape[-2]), tf.to_float(resized_range))
different_seqs_ids = tf.to_int32(tf.floor(different_seqs_ids_float))
different_seqs_ids_packed = tf.scalar_mul(inp_shape[-2], different_seqs_ids)
thseq = tf.expand_dims(different_seqs_ids_packed, -1)
# in case there are negative indices, make them all be equal to -1
# and add masking value to the ending of inp_p - that way, everything that should be masked
# will get the masking value as features.
mask = tf.greater_equal(ins_p, 0) # extract where minuses are, because the will all default to default value
# .. before the mod operation, if provided greater id numbers, to wrap correctly small sequences
offset_ins_p = tf.mod(ins_p, inp_shape[-2]) + thseq # broadcast to ins_p
minus_1 = tf.scalar_mul(tf.shape(inp_p)[0], tf.ones_like(mask, dtype=tf.int32))
'''
On GPU, if we use index = -1 anywhere it would throw a warning:
OP_REQUIRES failed at gather_nd_op.cc:50 : Invalid argument:
flat indices = [-1] does not index into param.
Which is a warning, that there are -1s. We are using that as feature and know about that.
'''
offset_ins_p = tf.where(mask, offset_ins_p, minus_1)
# also possible to do something like tf.multiply(offset_ins_p, mask) + tf.scalar_mul(-1, mask)
mask_value_last = tf.zeros((inp_shape[-1],))
if self.mask_value != 0:
mask_value_last += tf.constant(self.mask_value) # broadcasting if needed
inp_p = tf.concat([inp_p, tf.expand_dims(mask_value_last, 0)], axis=0)
# expand dims so that it would slice n times instead having slice of length n indices
neighb_p = tf.gather_nd(inp_p, tf.expand_dims(offset_ins_p, -1)) # [-1,indices, features]
out_shape = tf.concat([inds_shape, inp_shape[features_dim:]], axis=-1)
neighb = tf.reshape(neighb_p, out_shape)
# ^^ [...,batches,sequence_len, indices,features]
if self.include_self: # if is set, add self at the 0th position
self_originals = tf.expand_dims(inp, axis=features_dim-1)
# ^^ [...,batches,sequence_len, 1, features]
neighb = tf.concat([neighb, self_originals], axis=features_dim-1)
if self.flatten_indices_features:
neighb = tf.reshape(neighb, tf.concat([inds_shape[:-1], [-1]], axis=-1))
return neighb
With a debuggable interactive test:
def allow_tf_debug(func):
"""
Decorator for tests that use tensorflow, to make them more breakpoint-friendly, i.e. to be able to call .eval()
on tensors immediately.
"""
def interactive_wrapper():
sess = tf.InteractiveSession()
ret = func()
sess.close()
return ret
return interactive_wrapper
#allow_tf_debug
def test_gather_from_indices():
gat = GatherFromIndices(include_self=False, flatten_indices_features=False)
# test for include_self=True is not included
# test for flatten_indices_features not included
seq = [ # batch of sequences
# sequences of 2d features
[[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8]],
[[10, 1], [11, 2], [12, 3], [13, 4], [14, 5], [15, 6], [16, 7], [17, 8]]
]
ids = [ # batch of sequences
# sequences of 3 ids of each item in sequence
[[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3], [5, 5, 5], [6, 6, 6], [7, 7, 7]],
[[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [5, 6, 7], [6, 7, 0], [7, 0, -1]]
# minus one should mean masking
]
def compute_assert_2ways_gathers(seq, ids):
seq = np.array(seq, dtype=np.float32)
ids = np.array(ids, dtype=np.int32)
# intended_look
result_np = None
if len(ids.shape) == 3: # classical batches
result_np = np.empty(list(ids.shape) + [seq.shape[-1]])
for b, seq_in_batch in enumerate(ids):
for i, sid in enumerate(seq_in_batch):
for c, copyid in enumerate(sid):
assert ids[b,i,c] == copyid
if ids[b,i,c] < 0:
result_np[b, i, c, :] = 0
else:
result_np[b, i, c, :] = seq[b, ids[b,i,c], :]
elif len(ids.shape) == 4: # some other batching format...
result_np = np.empty(list(ids.shape) + [seq.shape[-1]])
for mb, mseq_in_batch in enumerate(ids):
for b, seq_in_batch in enumerate(mseq_in_batch):
for i, sid in enumerate(seq_in_batch):
for c, copyid in enumerate(sid):
assert ids[mb, b, i, c] == copyid
if ids[mb, b, i, c] < 0:
result_np[mb, b, i, c, :] = 0
else:
result_np[mb, b, i, c, :] = seq[mb, b, ids[mb, b, i, c], :]
output_shape_kerascomputed = gat.compute_output_shape([seq.shape, ids.shape])
assert isinstance(output_shape_kerascomputed, tuple)
assert list(output_shape_kerascomputed) == list(result_np.shape)
#with tf.get_default_session() as sess:
sess = tf.get_default_session()
gat.build(seq.shape)
result = gat.call([tf.constant(seq), tf.constant(ids)])
tf_result = sess.run(result)
assert list(tf_result.shape) == list(output_shape_kerascomputed)
assert np.all(np.equal(tf_result, result_np))
compute_assert_2ways_gathers(seq, ids)
compute_assert_2ways_gathers(seq * 5, ids * 5)
compute_assert_2ways_gathers([seq] * 3, [ids] * 3)
And usage example for 5 neighbours per node:
fields_input = Input(shape=(None, 10, name='nodedata')
neighbours_ids_input = Input(shape=(None, 5), name='nodes_neighbours_ids', dtype='int32')
fields_input_with_neighbours = GatherFromIndices(mask_value=0,
include_self=True, flatten_indices_features=True)\
([fields_input, neighbours_ids_input])
fields = Conv1D(128, kernel_size=5, padding='same',
activation='relu')(fields_input_with_neighbours) # data_format="channels_last"

Related

How to define variables separately where you need to index over a list of lists?

I'd like to define a binary variable x_ijk where i in I is an n-element Vector{Vector{Int64}}, k in K is an n-element Vectors, and j in J is a m-elemts vectors.
Always the length of K is equal to the number of vectors in I. How to index over each elements of the nth vector in I with the nth elemnts in K pairwise?
For Example:
I = [[2,6,5], [1,2,4,5,9]]
J = [1,2,3]
K = [4,5] # for a better explanation suppose K = [a,b]
How to have the variables with index every entries in every vectorI?
What I'd like to have is like this:
# for a better explanation suppose K = [a,b]
# for each vector in I and associated elemnts in K having a variable
# for first pair (i.e. I=[2,6,5] ,K = a)
x[2,1,a], x[2,2,a], x[2,3,a], x[6,1,a], .... x[5,3,a] # in other words we cannot have x[2,1,b] or any other combination with `b`
# for second pair ( i.e. I=[1,2,4,5,9] ,K = b)
x[1,1,b], x[1,2,b],..., x[4,1,b], .... x[9,3,b]
My last try was unsuccsful too:
for idx in 1:length(K)
#variable(model, x[i in I[idx], j in J, k in K] >= 0, Bin)
end
I believe you are looking for a way to flatten I so this could be:
julia> #variable(model, x[i in unique!(vcat(I...)), j in J, k in K] >= 0, Bin)
3-dimensional DenseAxisArray{VariableRef,3,...} with index sets:
Dimension 1, [2, 6, 5, 4, 1, 3, 9]
Dimension 2, [1, 2, 3]
Dimension 3, [4, 5]
And data, a 7×3×2 Array{VariableRef, 3}:
[:, :, 4] =
x[2,1,4] x[2,2,4] x[2,3,4]
x[6,1,4] x[6,2,4] x[6,3,4]
x[5,1,4] x[5,2,4] x[5,3,4]
x[4,1,4] x[4,2,4] x[4,3,4]
x[1,1,4] x[1,2,4] x[1,3,4]
x[3,1,4] x[3,2,4] x[3,3,4]
x[9,1,4] x[9,2,4] x[9,3,4]
[:, :, 5] =
x[2,1,5] x[2,2,5] x[2,3,5]
x[6,1,5] x[6,2,5] x[6,3,5]
x[5,1,5] x[5,2,5] x[5,3,5]
x[4,1,5] x[4,2,5] x[4,3,5]
x[1,1,5] x[1,2,5] x[1,3,5]
x[3,1,5] x[3,2,5] x[3,3,5]
x[9,1,5] x[9,2,5] x[9,3,5]
Not since the values of I are repeated unique! was required.

Dependent Arrays in Constraints JuMP

I want to code this constraint.
d and a in the below code are the subsets of set S with the size of N. For example: (N=5, T=3, S=6), d=[1,2,2,3,1] (the elements of d are the first three digits of S and the size of d is N) and a=[6,4,5,6,4] (the elements of a are the three last digits of set S and the size of a is N).
In the constraint, s should start with d and end with a.
It should be like s[j=1]=1:6, s[j=2]=2:4, s[j=3]=2:5, s[j=4]=3:6, s[j=5]1:4.
I do not know how to deal with this set that depends on the other sets. Can you please help me to code my constraint correctly? The below code is not working correctly.
N = 5
T=3
S=6
Cap=15
Q=rand(1:5,N)
d=[1,2,2,3,1]
a=[6,4,5,6,4]
#variable(model, x[j=1:N,t=1:T,s=1:S], Bin)
#constraint(model, [j= 1:N,t = 1:T, s = d[j]:a[j]], sum(x[j,t,s] * Q[j] for j=1:N) <= Cap)
N, T, S = 5, 3, 6
Q = rand(1:5,N)
d = [1, 2, 2, 3, 1]
a = [6, 4, 5, 6, 4]
using JuMP
model = Model()
#variable(model, x[1:N, 1:T, 1:S], Bin)
#constraint(
model,
[t = 1:T, s = 1:S],
sum(x[j, t, s] * Q[j] for j in 1:N if d[j] <= s < a[j]) <= 15,
)
p.s. There's no need to post multiple comments and questions:
Coding arrays in constraint JuMP
You should also consider posting on the Julia discourse instead: https://discourse.julialang.org/c/domain/opt/13. It's easier to have a conversation there.

Walking through multidimensional space in a proper way

Assuming I have a vector of say four dimensions in which every variable lays in a special interval. Thus we got:
Vector k = (x1,x2,x3,x4) with x1 = (-2,2), x2 = (0,2), x3 = (-4,1), x4 = (-1,1)
I am only interested in the points constraint by the intervals.
So to say v1 = (0,1,2,0) is important where v2 = (-5,-5,5,5) is not.
In additon to that the point i+1 should be relatively close to point i among my journey. Therefore I dont want to jump around in space.
Is there a proper way of walking through those interesting points?
For example in 2D space with x1,x2 = (-2,2) like so:
Note: The frequenz of the red line could be higher
There are many ways to create a space-filling curve while preserving closeness. See the Wikipedia article for a few examples (some have associated algorithms for generating them): https://en.wikipedia.org/wiki/Space-filling_curve
Regardless, let's work with your zig-zag pattern for 2D and work on extending it to 3D and 4D. To extend it into 3D, we just add another zig to the zig-zag. Take a look at the (rough) diagram below:
Essentially, we repeat the pattern that we had in 2D but we now have multiple layers that represent the third dimension. The extra zig that we need to add is the switch between bottom-to-top and top-to-bottom every layer. This is pretty simple to abstract:
In 2D, we have x and y axes.
We move across the x domain switching between positive and negative
directions most frequently.
We move across the y domain once.
In 3D, we have x, y, and z axes.
We move across the x domain switching between positive and negative directions most frequently.
We move across the y domain switching between positive and negative directions second most frequently.
We move across the z domain once.
It should be clear how this generalizes to higher dimensions. Now, I'll present some (Python 3) code that implements the zig-zag pattern for 4D. Let's represent the position in 4D space as (x, y, z, w) and the ranges in each dimension as (x0, x1), (y0, y1), (z0, z1), (w0, w1). These are our inputs. Then, we also define xdir, ydir, and zdir to keep track of the direction of the zig-zag.
x, y, z, w = x0, y0, z0, w0
xdir, ydir, zdir = +1, +1, +1
for iw in range(w1 - w0):
for iz in range(z1 - z0):
for iy in range(y1 - y0):
for ix in range(x1 - x0):
print(x, y, z, w)
x = x + xdir
xdir = -xdir
print(x, y, z, w)
y = y + ydir
ydir = -ydir
print(x, y, z, w)
z = z + zdir
zdir = -zdir
print(x, y, z, w)
w = w + 1
This algorithm has the guarantee that no two points printed out after each other have a distance greater than 1.
Using recursion, you can clean this up to make a very nice generalizable method. I hope this helps; let me know if you have any questions.
With the work of #Matthew Miller I implemented this generalization for any given multidimenisonal space:
'''assuming that we take three points out of our intervals [0,2] for a,b,c
which every one of them is corresponding to one dimension i.e. a 3D-space'''
a = [0,1,2]
b = [0,1,2]
c = [0,1,2]
vec_in = []
vec_in.append(a)
vec_in.append(b)
vec_in.append(c)
result = []
hold = []
dir = [False] * len(vec_in)
def create_points(vec , index, temp, desc):
if (desc):
loop_x = len(vec[index])-1
loop_y = -1
loop_z = -1
else:
loop_x = 0
loop_y = len(vec[index])
loop_z = 1
for i in range(loop_x,loop_y,loop_z):
temp.append(vec[index][i])
if (index < (len(vec) - 1)):
create_points(vec, index + 1, temp, dir[index])
else:
u = []
for k in temp:
u.append(k)
result.append(u)
temp.pop()
if (dir[index] == False):
dir[index] = True
else:
dir[index] = False
if len(temp) != 0:
temp.pop()
#render
create_points(vec_in, 0, hold, dir[0])
for x in (result):
print(x)
The result is a journey which covers every possible postion in a continous way:
[0, 0, 0]
[0, 0, 1]
[0, 0, 2]
[0, 1, 2]
[0, 1, 1]
[0, 1, 0]
[0, 2, 0]
[0, 2, 1]
[0, 2, 2]
[1, 2, 2]
[1, 2, 1]
[1, 2, 0]
[1, 1, 0]
[1, 1, 1]
[1, 1, 2]
[1, 0, 2]
[1, 0, 1]
[1, 0, 0]
[2, 0, 0]
[2, 0, 1]
[2, 0, 2]
[2, 1, 2]
[2, 1, 1]
[2, 1, 0]
[2, 2, 0]
[2, 2, 1]
[2, 2, 2]

From a list of ints extract all consecutive repetitions in a list of lists

Extract all consecutive repetitions in a given list:
list1 = [1,2,2,3,3,3,3,4,5,5]
It should yield a list like this
[[2,2],[3,3,3,3],[5,5]]
I tried the code below. I know it is not the proper way to solve this problem but I could not manage how to solve this.
list1 = [1,2,2,3,3,3,3,4,5,5]
list2 = []
for i in list1:
a = list1.index(i)
if list1[a] == list1[a+1]:
list2.append([i,i])
print(list2)
You can use this to achieve it. There are "easier" solutions using itertools and groupby to get the same result, this is how to do it "by hand":
def FindInnerLists(l):
'''reads a list of int's and groups them into lists of same int value'''
result = []
allResults = []
for n in l:
if not result or result[0] == n: # not result == empty list
result.append(n)
if result[0] != n: # number changed, so we copy the list over into allResults
allResults.append(result[:])
result = [n] # and add the current to it
# edge case - if result contains elements, add them as last item to allResults
if result:
allResults.append(result[:])
return allResults
myl = [2, 1, 2, 1, 1, 1, 1, 2, 2, 2, 1, 2, 7, 1, 1, 1,2,2,2,2,2]
print(FindInnerLists(myl))
Output (works for 2.6 and 3.x):
[[2], [1], [2], [1, 1, 1, 1], [2, 2, 2], [1], [2], [7], [1, 1, 1], [2, 2, 2, 2, 2]]
Another way to do it:
list1 = [1, 2, 2, 3, 3, 3, 3, 4, 5, 5]
result = [[object()]] # initiate the result with object() as a placeholder
for element in list1: # iterate over the rest...
if result[-1][0] != element: # the last repeated element does not match the current
if len(result[-1]) < 2: # if there was less than 2 repeated elements...
result.pop() # remove the last element
result.append([]) # create a new result entry for future repeats
result[-1].append(element) # add the current element to the end of the results
if len(result[-1]) < 2: # finally, if the last element wasn't repeated...
result.pop() # remove it
print(result) # [[2, 2], [3, 3, 3, 3], [5, 5]]
And you can use it on any kind of a list, not just numerical.
This would work:
list1 = [1,2,2,3,3,3,3,4,5,5]
res = []
add = True
last = [list1[0]]
for elem in list1[1:]:
if last[-1] == elem:
last.append(elem)
if add:
res.append(last)
add = False
else:
add = True
last = [elem]
print(res)
Output:
[[2, 2], [3, 3, 3, 3], [5, 5]]

How to do outer product as a layer with chainer?

How can I include an outer product (of the previous feature vector and itself) as a layer in chainer, especially in a way that's compatible with batching?
F.matmul is also very handy.
Depending on the input shapes, you can combine it with F.expand_dims (of course F.reshape works, too) or use transa/transb arguments.
For details, refer to the official documentation of functions.
Code
import chainer.functions as F
import numpy as np
print("---")
x = np.array([[[1], [2], [3]], [[4], [5], [6]]], 'f')
y = np.array([[[1, 2, 3]], [[4, 5, 6]]], 'f')
print(x.shape)
print(y.shape)
z = F.matmul(x, y)
print(z)
print("---")
x = np.array([[[1], [2], [3]], [[4], [5], [6]]], 'f')
y = np.array([[[1], [2], [3]], [[4], [5], [6]]], 'f')
print(x.shape)
print(y.shape)
z = F.matmul(x, y, transb=True)
print(z)
print("---")
x = np.array([[1, 2, 3], [4, 5, 6]], 'f')
y = np.array([[1, 2, 3], [4, 5, 6]], 'f')
print(x.shape)
print(y.shape)
z = F.matmul(
F.expand_dims(x, -1),
F.expand_dims(y, -1),
transb=True)
print(z)
Output
---
(2, 3, 1)
(2, 1, 3)
variable([[[ 1. 2. 3.]
[ 2. 4. 6.]
[ 3. 6. 9.]]
[[ 16. 20. 24.]
[ 20. 25. 30.]
[ 24. 30. 36.]]])
---
(2, 3, 1)
(2, 3, 1)
variable([[[ 1. 2. 3.]
[ 2. 4. 6.]
[ 3. 6. 9.]]
[[ 16. 20. 24.]
[ 20. 25. 30.]
[ 24. 30. 36.]]])
---
(2, 3)
(2, 3)
variable([[[ 1. 2. 3.]
[ 2. 4. 6.]
[ 3. 6. 9.]]
[[ 16. 20. 24.]
[ 20. 25. 30.]
[ 24. 30. 36.]]])
You can use F.reshape and F.broadcast_to to explicitly handle array.
Assume you have 2-dim array h with shape (minibatch, feature).
If you want to calculate outer product of h and h, try below code.
Is this what you want to do?
import numpy as np
from chainer import functions as F
def outer_product(h):
s0, s1 = h.shape
h1 = F.reshape(h, (s0, s1, 1))
h1 = F.broadcast_to(h1, (s0, s1, s1))
h2 = F.reshape(h, (s0, 1, s1))
h2 = F.broadcast_to(h2, (s0, s1, s1))
h_outer = h1 * h2
return h_outer
# test code
h = np.arange(12).reshape(3, 4).astype(np.float32)
h_outer = outer_product(h)
print(h.shape)
print(h_outer.shape, h_outer.data)

Resources