Advertisement
Guest User

Untitled

a guest
May 23rd, 2016
291
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 10.54 KB | None | 0 0
  1. import os
  2. import sys
  3. import numpy as np
  4. import tensorflow as tf
  5. import random
  6. import basic_cnn_model
  7. import pickle
  8. import cv2
  9. from scipy import stats
  10. import math
  11.  
  12.  
  13. global g_train_files1,g_train_classes,g_test_files1,g_test_classes,g_val_files1,g_val_classes,g_val_files2,g_test_files2,g_train_files2
  14.  
  15. ############################Parms ################################################
  16. learning_rate=0.009
  17. log_dir='/media/anilil/Data/charm/Cnn/Output_logs/xml_ucf_cnn/'
  18. batch_size=64
  19. training_iters=70000
  20. n_classes=101
  21. data_root='/media/anilil/Data/Datasets/UCf_scales/ucf_xmlparse/MV_UCF101_H264_CRF18_GOP30/'
  22. display_step=100
  23. ############################Parms ################################################
  24.  
  25. def load_from_file(name):
  26. with open(name, "rb") as f:
  27. dictname = pickle.load(f)
  28. return dictname
  29.  
  30. def keep_png(path,listr):
  31. ll = list()
  32. for x in listr:
  33. if os.path.isfile(path+str(x)) & str(x).endswith('.jpg'):
  34. ll.append(x)
  35. return ll
  36.  
  37. def read_from_tex(filename):
  38. data1=list()
  39. classes=list()
  40. with open(filename,'r') as f:
  41. for each_line in f:
  42. x=str(each_line).replace('\n','').replace('\r','').split(',')
  43. data1.append(x[0])
  44. data2.append(x[1])
  45. classes.append(x[2])
  46. return data1,data2,classes
  47.  
  48. def readfiles_fromtext(filename):
  49. data1=list()
  50. data2=list()
  51. classes=list()
  52. with open(filename,'r') as f:
  53. for each_line in f:
  54. x=str(each_line).replace('\n','')
  55. data1.append(x.split(' ')[0])
  56. classes.append(x.split(' ')[1])
  57. return data1,classes
  58.  
  59. def train_loadbatch_from_lists(b_siz,pos):
  60. if pos+b_siz>len(g_train_classes):
  61. pos=pos%len(g_train_classes)
  62. files1,classes=g_train_files1[pos:pos+b_siz],g_train_classes[pos:pos+b_siz]
  63. temp_batch=np.zeros(shape=(b_siz,227,227,3),dtype=np.float32)
  64. temp_classes=np.zeros(shape=(b_siz,101),dtype=np.float32)
  65. for x in range(0,b_siz):
  66. temp_batch[x,:,:,:]=cv2.resize(cv2.imread(files1[x]),(227,227))
  67. temp_classes[x,:]=create_one_hot(int(classes[x]))
  68. temp_batch= (temp_batch-127)/127
  69. return temp_batch,temp_classes
  70.  
  71. def replace_root(listw):
  72. x= list()
  73. for each in listw:
  74. x.append(str(each).replace('/media/anilil/Data/Datasets/UCf_scales/ucf_xmlparse/MV_UCF101_H264_CRF18_GOP30/',data_root))
  75. return x
  76.  
  77. def test_loadbatch_from_lists(b_siz,pos):
  78. if pos+b_siz>len(g_test_classes):
  79. pos=pos%len(g_test_classes)
  80. files1,classes=g_test_files1[pos:pos+b_siz],g_test_classes[pos:pos+b_siz]
  81. temp_batch=np.zeros(shape=(b_siz,227,227,3),dtype=np.float32)
  82. temp_classes=np.zeros(shape=(b_siz,101),dtype=np.float32)
  83. for x in range(0,b_siz):
  84. temp_batch[x,:,:,:]=cv2.resize(cv2.imread(files1[x]),(227,227))
  85. temp_classes[x,:]=create_one_hot(int(classes[x]))
  86. temp_batch= (temp_batch-127)/127
  87. return temp_batch,temp_classes
  88.  
  89.  
  90. def create_one_hot(num,len1=101):
  91. tem=np.zeros(shape=(len1))
  92. tem[int(num)]=1.0
  93. return tem
  94.  
  95. def shuffle_list(list1,list2,num=3):
  96. combined = zip(list1, list2)
  97. for x in range(0,num):
  98. random.shuffle(combined)
  99. list1,list2= zip(*combined)
  100. return list1,list2
  101.  
  102. def load_model(log_dir):
  103. ckpt = tf.train.get_checkpoint_state(log_dir)
  104. if ckpt and ckpt.model_checkpoint_path:
  105. saver.restore(sess, ckpt.model_checkpoint_path)
  106.  
  107.  
  108. # Store layers weight & bias
  109. weights = {
  110. 'wc1': tf.Variable(tf.random_normal([7, 7, 3, 96],stddev=0.01)),
  111. 'wc2': tf.Variable(tf.random_normal([5, 5, 96, 384],stddev=0.01)),
  112. 'wc3': tf.Variable(tf.random_normal([3, 3, 384, 512],stddev=0.01)),
  113. 'wc4': tf.Variable(tf.random_normal([3, 3, 512, 512],stddev=0.01)),
  114. 'wc5': tf.Variable(tf.random_normal([3, 3, 512, 384],stddev=0.01)),
  115. 'wd1': tf.Variable(tf.random_normal([2*2*384, 4096],stddev=0.01)),
  116. 'wd2': tf.Variable(tf.random_normal([4096,4096 ],stddev=0.01)),
  117. 'out': tf.Variable(tf.random_normal([4096,101],stddev=0.01))
  118. }
  119. biases = {
  120. 'bc1': tf.Variable(tf.random_normal([96])),
  121. 'bc2': tf.Variable(tf.random_normal([384])),
  122. 'bc3': tf.Variable(tf.random_normal([512])),
  123. 'bc4': tf.Variable(tf.random_normal([512])),
  124. 'bc5': tf.Variable(tf.random_normal([384])),
  125. 'bd1': tf.Variable(tf.random_normal([4096])),
  126. 'bd2': tf.Variable(tf.random_normal([4096])),
  127. 'out': tf.Variable(tf.random_normal([101]))
  128. }
  129.  
  130.  
  131.  
  132. def conv2d(name, l_input, w, b,stride=1):
  133. return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, stride, stride, 1], padding='VALID'),b), name=name)
  134.  
  135. def max_pool(name, l_input, k,stride=1):
  136. return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, stride, stride, 1], padding='VALID', name=name)
  137.  
  138. def norm(name, l_input, lsize=5):
  139. return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001, beta=0.75, name=name)
  140.  
  141. def cnn_model(_X, _weights, _biases, _dropout):
  142.  
  143. # 1st Convolution + Relu Layer
  144. conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'],stride=2)#stride 2
  145. # Max Pooling (down-sampling)
  146. pool1 = max_pool('pool1', conv1, k=3,stride=2)
  147. # Apply Normalization
  148. norm1 = norm('norm1', pool1, lsize=5)
  149.  
  150. # 2nd Convolution + Relu Layer
  151. conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'],stride=2)
  152. # Max Pooling (down-sampling)
  153. pool2 = max_pool('pool2', conv2, k=3,stride=2)
  154. # Apply Normalization
  155. norm2 = norm('norm2', pool2, lsize=5)
  156.  
  157. # 3rd Convolution Layer
  158. conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])
  159. # 4th conv layer
  160. conv4 = conv2d('conv4', conv3, _weights['wc4'], _biases['bc4'])
  161. # 5th conv layer
  162. conv5 = conv2d('conv4', conv4, _weights['wc5'], _biases['bc5'])
  163. # Pooling
  164. pool5 = max_pool('pool5', conv5, k=3,stride=2)
  165.  
  166. # Fully connected layer
  167. dense1 = tf.reshape(pool5, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv3 output to fit dense layer input
  168. dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1') # Relu activation
  169. # Apply Dropout
  170. dense1 = tf.nn.dropout(dense1, _dropout)
  171.  
  172. dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2') # Relu activation
  173. # Apply Dropout
  174. dense2 = tf.nn.dropout(dense2, _dropout)
  175.  
  176. #Output, class prediction
  177. out = tf.matmul(dense2, _weights['out']) + _biases['out']
  178.  
  179. return out
  180.  
  181. g_train_files1,g_train_classes=readfiles_fromtext('/home/anilil/projects/lstm/lisa-caffe-public/examples/LRCN_activity_recognition/singleframe_flow/ucf_xml_train_list.txt')
  182. g_test_files1,g_test_classes=readfiles_fromtext('/home/anilil/projects/lstm/lisa-caffe-public/examples/LRCN_activity_recognition/singleframe_flow/ucf_xml_test_list.txt')
  183.  
  184.  
  185.  
  186. x = tf.placeholder(tf.float32, [None, 227,227,3])
  187. y = tf.placeholder(tf.float32, [None, 101])
  188. keep_prob = tf.placeholder(tf.float32)
  189.  
  190. pred= cnn_model(x,weights,biases,keep_prob)
  191. # Define loss and optimizer
  192. cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax loss
  193. optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
  194. # Evaluate model
  195.  
  196. correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
  197. accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
  198. Train_loss_summ=tf.scalar_summary("Train_Loss", cost)
  199. Train_acc_summ=tf.scalar_summary("Train_Accuracy", accuracy)
  200. test_loss_sum = tf.scalar_summary("Test_Loss", cost)
  201. test_accuracy_sum= tf.scalar_summary("Test_Accuracy", accuracy)
  202. merged_summary_op = tf.merge_all_summaries()
  203. saver = tf.train.Saver()
  204.  
  205.  
  206.  
  207. if not os.path.isdir(log_dir):
  208. os.makedirs(log_dir)
  209. # Initializing the variables
  210. init = tf.initialize_all_variables()
  211. f= open(log_dir+'log.log','w')
  212. f.write ("Learning Rate : {} \n".format(learning_rate))
  213. f.write ("Batch Size : {} \n".format(batch_size))
  214. f.write ("Total Number of Training itters to run : {} \n".format(training_iters))
  215. f.write ("Data from path : {}\n".format(data_root))
  216. f.write ("Number of classes : {}\n".format(n_classes))
  217. ####################################################################################################################################################################################
  218.  
  219. f.write ("Type of Optimizer : RMSPropOptimizer \n")
  220. ##########################################################################################
  221.  
  222. f.close()
  223. gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
  224.  
  225.  
  226. with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:#config=tf.ConfigProto(gpu_options=gpu_options)
  227. sess.run(init)
  228. step = 0
  229. epoch=0
  230. temp=1
  231. summary_writer = tf.train.SummaryWriter(log_dir, graph_def=sess.graph_def)
  232. # Keep training until reach max iterations
  233. while step * batch_size < training_iters:
  234.  
  235. batch_xs, batch_ys = train_loadbatch_from_lists(batch_size,step * batch_size)
  236. sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys,
  237. keep_prob: 0.5})
  238. if step % display_step == 0:
  239. # Calculate batch accuracy and loss
  240.  
  241. tr_acc_sum,tr_loss_sum,acc,loss = sess.run([Train_acc_summ,Train_loss_summ,accuracy,cost], feed_dict={x: batch_xs, y: batch_ys,
  242. keep_prob: 0.5})
  243.  
  244. test_data,test_label=test_loadbatch_from_lists(batch_size,step * batch_size)
  245.  
  246. test_acc_sum,test_los_sum,test_acc ,test_cost=sess.run([test_accuracy_sum,test_loss_sum,accuracy, cost], feed_dict={x: test_data, y: test_label,
  247. keep_prob: 1})
  248. summary_writer.add_summary(tr_acc_sum, step * batch_size +1)
  249. summary_writer.add_summary(tr_loss_sum, step * batch_size +1)
  250. summary_writer.add_summary(test_acc_sum, step * batch_size +1)
  251. summary_writer.add_summary(test_los_sum, step * batch_size +1)
  252. f=open(log_dir+'log.log','a')
  253. f.write ("Iter={:10d},Train_Loss={:.6f},Train_Accuracy={:.6f},Test_Loss={:.6f},Test_Accuracy={:.6f} \n".format(step * batch_size,loss,acc,test_cost,test_acc))
  254. f.close()
  255. print "Iter={:10d},Train_Loss={:.6f},Train_Accuracy={:.6f},Test_Loss={:.6f},Test_Accuracy={:.6f} \n".format(step * batch_size,loss,acc,test_cost,test_acc)
  256. step += 1
  257. temp =temp+ batch_size
  258. if (temp>15000):
  259. temp=1
  260. epoch=epoch+1
  261. saver.save(sess,log_dir+'model.ckpt',global_step=step * batch_size+1)
  262.  
  263. print ("Optimization Finished!")
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement