Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- ### Matrix Factorization + Bias ###
- # Input variables
- mfb_user_inp = Input(shape=(1,), dtype='int32', name = 'user_inp')
- mfb_prod_inp= Input(shape=(1,), dtype='int32', name = 'prod_inp')
- mfb_User_Embedding = Embedding(input_dim = n_users, output_dim = latent_dim, name = 'user_embedding', input_length=1,embeddings_regularizer = regularizers.l2(1e-5))(mfb_user_inp)
- mfb_Prod_Embedding = Embedding(input_dim = n_prods, output_dim = latent_dim, name = 'item_embedding', input_length=1,embeddings_regularizer = regularizers.l2(1e-5))(mfb_prod_inp)
- # Crucial to flatten an embedding vector!
- mfb_user_bias = Flatten()(Embedding(n_users, 1, input_length = 1)(mfb_user_inp))
- mfb_prod_bias = Flatten()(Embedding(n_prods, 1, input_length = 1)(mfb_prod_inp))
- # Element-wise product of user and item embeddings
- mfb_predict_vector = merge([mfb_User_Embedding, mfb_Prod_Embedding], mode='mul')
- mfb_predict_vector = Flatten()(mfb_predict_vector)
- mfb_predict_vector = keras.layers.Add()([mfb_predict_vector, mfb_user_bias])
- #mfb_predict_vector = merge([mfb_predict_vector, mfb_user_bias], mode = 'sum')
- mfb_predict_vector = keras.layers.Add()([mfb_predict_vector, mfb_prod_bias])
- #mfb_predict_vector = merge([mfb_predict_vector, mfb_prod_bias], mode = 'sum')
- # Final prediction layer
- mfb_prediction = Dense(1, activation='sigmoid', init='lecun_uniform', name = 'prediction')(mfb_predict_vector)
- MF_model_bias = Model(input=[mfb_user_inp, mfb_prod_inp],output=mfb_prediction)
Add Comment
Please, Sign In to add comment