Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from __future__ import print_function
- import tensorflow as tf
- from tensorflow.contrib import rnn
- import csv
- import bisect
- import glob
- import re
- import numpy as np
- import random
- import data
- import config
- cfg = config.Config()
- graph_data = data.Graph_data(cfg)
- X = tf.placeholder("float", [None, cfg.N_movies])
- Y = tf.placeholder("float", [None, cfg.N_movies])
- def Dense(x):
- hidden_layer_1 = tf.layers.dense(inputs=x, units=500, activation=tf.nn.relu)
- hidden_layer_2 = tf.layers.dense(inputs=hidden_layer_1, units=50, activation=tf.nn.relu)
- output_layer = tf.layers.dense(inputs = hidden_layer_2, units= cfg.N_movies, activation=tf.nn.softmax)
- return output_layer
- logits = Dense(X)
- cross_entropy = tf.reduce_sum(- Y * tf.log(logits), 1)
- loss_op = tf.reduce_mean(cross_entropy)
- optimizer = tf.train.GradientDescentOptimizer(learning_rate=cfg.learning_rate)
- train_op = optimizer.minimize(loss_op)
- init = tf.global_variables_initializer()
- with tf.Session() as sess:
- sess.run(init)
- for step in range(1, cfg.training_steps+1):
- batch_x, batch_y = graph_data.train_next_batch(cfg.batch_size)
- sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
- if step % cfg.display_step == 0 or step == 1:
- loss = sess.run(loss_op, feed_dict={X: batch_x,
- Y: batch_y})
- print("loss = ",loss)
Add Comment
Please, Sign In to add comment