Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- import numpy as np
- from keras import objectives
- from keras import backend as K
- def pull_away_loss(g):
- # normalizatrion (using vectorization method)
- Nor = tf.norm(g, axis=1)
- Nor_mat = tf.tile(tf.expand_dims(Nor, axis=1), [1, tf.shape(g)[1]])
- X = tf.divide(g, Nor_mat)
- # Multiply and squared
- X_X = tf.square(tf.matmul(X, tf.transpose(X)))
- # create mask where diagonal are 0 (i.e. where i != j)
- mask = tf.subtract(tf.ones_like(X_X),
- tf.linalg.diag(tf.ones([tf.shape(X_X)[0]])))
- # multiply: zero out with the mask
- # reduce_sum: calculate the sum of all angle differences
- # divided by N * (N-1), as shown in the formula
- pt_loss = tf.divide(tf.reduce_sum(tf.multiply(X_X, mask)),
- tf.multiply(
- tf.cast(tf.shape(X_X)[0], tf.float32),
- tf.cast(tf.shape(X_X)[0]-1, tf.float32)))
- return pt_loss
- _EPSILON = K.epsilon()
- def _loss_tensor(y_true, y_pred):
- y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
- out = -(y_true * K.log(y_pred) + (1.0 - y_true) * K.log(1.0 - y_pred))
- return K.mean(out) * pull_away_loss(y_pred)
- def _loss_np(y_true, y_pred):
- y_pred = np.clip(y_pred, _EPSILON, 1.0-_EPSILON)
- out = -(y_true * np.log(y_pred) + (1.0 - y_true) * np.log(1.0 - y_pred))
- return np.mean(out, axis=-1)
- def check_loss(_shape):
- if _shape == '2d':
- shape = (6, 7)
- elif _shape == '3d':
- shape = (5, 6, 7)
- elif _shape == '4d':
- shape = (8, 5, 6, 7)
- elif _shape == '5d':
- shape = (9, 8, 5, 6, 7)
- y_a = np.random.random(shape)
- y_b = np.random.random(shape)
- out1 = K.eval(_loss_tensor(K.variable(y_a), K.variable(y_b)))
- print(out1)
- # out2 = _loss_np(y_a, y_b)
- # assert out1.shape == out2.shape
- # assert out1.shape == shape[:-1]
- #print(np.linalg.norm(out1))
- # print(np.linalg.norm(out2))
- # print(np.linalg.norm(out1-out2))
- def test_loss():
- shape_list = ['2d']
- for _shape in shape_list:
- check_loss(_shape)
- print('======================')
- if __name__ == '__main__':
- test_loss()
- import tensorflow as tf
- from tensorflow.keras.layers import Dense, LSTM, Masking, TimeDistributed, RepeatVector
- from tensorflow.keras.preprocessing.sequence import pad_sequences
- from tensorflow.keras.models import Sequential
- import numpy as np
- #(samples,timesteps,features) samples=4,features=3, timesteps=variable length
- train_X = np.array([
- [[0, 1, 2], [9, 8, 7]],
- [[3, 4, 5]],
- [[6, 7, 8], [6, 5, 4]],
- [[9, 0, 1], [3, 7, 4]]
- ])
- train_Y = np.array([0, 1, 1, 0])
- n_in = 3
- n_feat = 3
- n_out = 1
- # padding
- '''
- train_X = np.array([
- [[0, 1, 2], [9, 8, 7],[3, 6, 8]],
- [[3, 4, 5], [0, 0, 0],[0, 0, 0]],
- [[6, 7, 8], [6, 5, 4],[1, 7, 4]],
- [[9, 0, 1], [3, 7, 4],[0, 0, 0]]
- ])
- '''
- train_X = pad_sequences(train_X, padding='post')
- model = Sequential()
- inputs = tf.keras.Input(shape=(n_in, n_feat))
- # Masking
- masked_input = Masking(mask_value=0)(inputs)
- # encoder
- encoder = LSTM(100, activation='relu')(masked_input)
- # decoder 1: reconstruct input sequence
- decoder1 = RepeatVector(n_in)(encoder)
- decoder1 = LSTM(100, activation='relu', return_sequences=True)(decoder1)
- decoder1 = TimeDistributed(Dense(3))(decoder1)
- # decoder 2: reconstruct output sequence
- decoder2 = RepeatVector(n_out)(encoder)
- decoder2 = LSTM(100, activation='relu', return_sequences=True)(decoder2)
- decoder2 = TimeDistributed(Dense(1))(decoder2)
- model = tf.keras.Model(inputs=inputs, outputs=[decoder1, decoder2])
- model.compile(optimizer='rmsprop', loss=_loss_tensor)
- # print(model.summary())
- model.fit(train_X, [train_X, train_Y], epochs=100, verbose=2)
- yhat = model.predict(train_X, verbose=0)
- print(yhat)
- print('==========================================')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement