Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- WINDOW = 30
- n_channels = 5
- n_classes = 2
- batch_size = 100 # Batch size
- learning_rate = 0.0001
- epochs = 10000
- graph = tf.Graph()
- # Construct placeholders
- with graph.as_default():
- inputs_ = tf.placeholder(tf.float32, [None, WINDOW, n_channels], name = 'inputs')
- labels_ = tf.placeholder(tf.float32, [None, n_classes], name = 'labels')
- keep_prob_ = tf.placeholder(tf.float32, name = 'keep')
- learning_rate_ = tf.placeholder(tf.float32, name = 'learning_rate')
- with graph.as_default():
- # Layer-1
- conv = tf.layers.conv1d(inputs=inputs_, filters=32, kernel_size=3, strides=1, padding='valid', activation=tf.nn.relu)
- conv = tf.layers.batch_normalization(conv)
- # Layer-2
- conv = tf.layers.conv1d(inputs=conv, filters=64, kernel_size=3, strides=1, padding='valid')
- conv = tf.nn.leaky_relu(conv, alpha=0.01)
- conv = tf.layers.batch_normalization(conv)
- # Layer-3
- conv = tf.layers.conv1d(inputs=conv, filters=128, kernel_size=3, strides=1, padding='valid')
- conv = tf.nn.leaky_relu(conv, alpha=0.01)
- conv = tf.layers.batch_normalization(conv)
- # Layer-4
- conv = tf.layers.conv1d(inputs=conv, filters=256, kernel_size=3, strides=1, padding='valid')
- conv = tf.nn.leaky_relu(conv, alpha=0.01)
- conv = tf.layers.batch_normalization(conv)
- # Layer-5
- conv = tf.layers.conv1d(inputs=conv, filters=256, kernel_size=3, strides=1, padding='valid')
- conv = tf.nn.leaky_relu(conv, alpha=0.01)
- conv = tf.layers.batch_normalization(conv)
- # Layer-6
- conv = tf.layers.conv1d(inputs=conv, filters=256, kernel_size=3, strides=1, padding='valid')
- conv = tf.nn.leaky_relu(conv, alpha=0.01)
- conv = tf.layers.batch_normalization(conv)
- with graph.as_default():
- # Flatten and add dropout
- flat = tf.layers.flatten(conv)
- flat = tf.nn.dropout(flat, keep_prob=keep_prob_)
- dense = tf.layers.dense(inputs=flat, units=256)
- dense = tf.nn.dropout(dense, rate=1-keep_prob_)
- dense = tf.layers.dense(inputs=dense, units=256)
- dense = tf.nn.dropout(dense, rate=1-keep_prob_)
- # Predictions
- logits = tf.layers.dense(flat, n_classes)
- # Cost function and optimizer
- cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels_))
- optimizer = tf.train.AdamOptimizer(learning_rate_).minimize(cost)
- # Accuracy
- correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(labels_, 1))
- accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
- validation_acc = []
- validation_loss = []
- train_acc = []
- train_loss = []
- with graph.as_default():
- saver = tf.train.Saver()
- with tf.Session(graph=graph) as sess:
- sess.run(tf.global_variables_initializer())
- iteration = 1
- for e in range(epochs):
- # Loop over batches
- for x,y in get_batches(X_tr, y_tr, batch_size):
- # Feed dictionary
- feed = {inputs_ : x, labels_ : y, keep_prob_ : 0.2, learning_rate_ : learning_rate}
- # Loss
- loss, _ , acc = sess.run([cost, optimizer, accuracy], feed_dict = feed)
- train_acc.append(acc)
- train_loss.append(loss)
- # Print at each 5 iters
- if (iteration % 50 == 0):
- print("Epoch: {}/{}".format(e, epochs),
- "Iteration: {:d}".format(iteration),
- "Train loss: {:6f}".format(loss),
- "Train acc: {:.6f}".format(acc))
- # Compute validation loss at every 10 iterations
- if (iteration%100 == 0):
- val_acc_ = []
- val_loss_ = []
- for x_v, y_v in get_batches(X_vld, y_vld, batch_size):
- # Feed
- feed = {inputs_ : x_v, labels_ : y_v, keep_prob_ : 1.0}
- # Loss
- loss_v, acc_v = sess.run([cost, accuracy], feed_dict = feed)
- val_acc_.append(acc_v)
- val_loss_.append(loss_v)
- # Print info
- print("Epoch: {}/{}".format(e, epochs),
- "Iteration: {:d}".format(iteration),
- "Validation loss: {:6f}".format(np.mean(val_loss_)),
- "Validation acc: {:.6f}".format(np.mean(val_acc_)))
- # Store
- validation_acc.append(np.mean(val_acc_))
- validation_loss.append(np.mean(val_loss_))
- # Iterate
- iteration += 1
- saver.save(sess,"/checkpoints-cnn/har.ckpt")
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement