Advertisement
Guest User

Untitled

a guest
Dec 5th, 2016
64
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 6.39 KB | None | 0 0
  1. Year_Month_Day,Hour_Minute,Temperature,Relative_humidity,Pressure,Total_Precipitation,Snowfall_amount,Total_cloud_cover,High_cloud_cover,Medium_cloud_cover,Low_cloud_cover,Shortwave_Radiation,Wind_speed_10m,Wind_direction_10m,Wind_speed_80m,Wind_direction_80m,Wind_speed_900m,Wind_direction_900m,Wind_Gust_10m,Difference
  2. 2016-10-24,23.00,15.47,76.00,1015.40,0.00,0.00,100.00,26.00,100.00,100.00,0.00,6.88,186.01,12.26,220.24,27.60,262.50,14.04,2.1
  3. 2016-10-24,22.00,16.14,73.00,1014.70,0.00,0.00,10.20,34.00,0.00,2.00,0.00,6.49,176.82,11.97,201.16,24.27,249.15,7.92,0.669999
  4. .....
  5. .....
  6. .....
  7. 2016-10-24,18.00,20.93,56.00,1012.20,0.00,0.00,100.00,48.00,15.00,100.00,91.67,6.49,146.31,12.10,149.62,17.65,163.41,8.64,1.65
  8. 2016-10-24,17.00,21.69,50.00,1012.10,0.00,0.00,100.00,42.00,10.00,100.00,243.86,9.50,142.70,12.77,139.57,19.08,144.21,32.40,0.76
  9.  
  10. import tensorflow as tf
  11. import pandas as pandas
  12. from sklearn import cross_validation
  13. from sklearn import preprocessing
  14. from sklearn import metrics
  15.  
  16. sess = tf.InteractiveSession()
  17.  
  18. data = pandas.read_csv("tuna.csv")
  19. print(data[-2:])
  20. #X=data.copy(deep=True)
  21.  
  22. X=data[['Relative_humidity','Pressure','Total_Precipitation','Snowfall_amount','Total_cloud_cover','High_cloud_cover','Medium_cloud_cover','Low_cloud_cover','Shortwave_Radiation','Wind_speed_10m','Wind_direction_10m','Wind_speed_80m','Wind_direction_80m','Wind_speed_900m','Wind_direction_900m','Wind_Gust_10m']].fillna(0)
  23. Y=data[['Temperature']]
  24.  
  25. number_of_samples=X.shape[0]
  26. elements_of_one_sample=X.shape[1]
  27.  
  28.  
  29. print("number of samples", number_of_samples)
  30. print("elements_of_one_sample", elements_of_one_sample)
  31. train_x, test_x, train_y, test_y = cross_validation.train_test_split(X, Y, test_size=0.1, random_state=42)
  32.  
  33. print("train_x.shape=", train_x.shape)
  34. print("train_y.shape=", train_y.shape)
  35. print("test_x.shape=", test_x.shape)
  36. print("test_y.shape=", test_y.shape)
  37.  
  38. epoch = 0 # counter for number of rounds training network
  39. last_cost = 0 # keep track of last cost to measure difference
  40. max_epochs = 2000 # total number of training sessions
  41. tolerance = 1e-6 # we stop when diff in costs less than that
  42. batch_size = 50 # we batch the data in groups of this size
  43. num_samples = train_y.shape[0] # number of samples in training set
  44. num_batches = int( num_samples / batch_size ) # compute number of batches, given
  45. print("############################## num_samples", num_samples)
  46. print("############################## num_batches", num_batches)
  47.  
  48. x = tf.placeholder(tf.float32, shape=[None, 16])
  49. y_ = tf.placeholder(tf.float32, shape=[None, 1])
  50.  
  51. # xW + b
  52. W = tf.Variable(tf.zeros([16,1]))
  53. b = tf.Variable(tf.zeros([1]))
  54.  
  55. sess.run(tf.initialize_all_variables())
  56.  
  57. # y = softmax(xW + b)
  58. y = tf.nn.softmax(tf.matmul(x,W) + b)
  59.  
  60. # lossはcross entropy
  61. cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
  62.  
  63. train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
  64.  
  65.  
  66. for n in range( num_batches ):
  67. batch_x = train_x[ n*batch_size : (n+1)*batch_size ]
  68. batch_y = train_y[ n*batch_size : (n+1)*batch_size ]
  69. train_step.run( feed_dict={x: batch_x, y_: batch_y} )
  70.  
  71.  
  72. correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
  73. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  74. print(accuracy.eval(feed_dict={x: test_x, y_: test_y}))
  75.  
  76.  
  77. # To create this model, we're going to need to create a lot of weights and biases.
  78. # One should generally initialize weights with a small amount of noise for symmetry
  79. # breaking, and to prevent 0 gradients
  80. def weight_variable(shape):
  81. initial = tf.truncated_normal(shape, stddev=0.1)
  82. return tf.Variable(initial)
  83.  
  84. # Since we're using ReLU neurons, it is also good practice to initialize them
  85. # with a slightly positive initial bias to avoid "dead neurons." Instead of doing
  86. # this repeatedly while we build the model, let's create two handy functions
  87. # to do it for us.
  88. def bias_variable(shape):
  89. initial = tf.constant(0.1, shape=shape)
  90. return tf.Variable(initial)
  91.  
  92.  
  93. # https://www.tensorflow.org/versions/master/api_docs/python/nn.html#conv2d
  94. def conv2d(x, W):
  95. return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
  96.  
  97.  
  98. # https://www.tensorflow.org/versions/master/api_docs/python/nn.html#max_pool
  99. def max_pool_2x2(x):
  100. return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')
  101.  
  102.  
  103. W_conv1 = weight_variable([2, 2, 1, 32])
  104. b_conv1 = bias_variable([32])
  105.  
  106.  
  107. x_image = tf.reshape(x, [-1,4,4,1])
  108.  
  109. h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
  110. h_pool1 = max_pool_2x2(h_conv1)
  111.  
  112. W_conv2 = weight_variable([2, 2, 32, 64])
  113. b_conv2 = bias_variable([64])
  114.  
  115. h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
  116. h_pool2 = max_pool_2x2(h_conv2)
  117.  
  118.  
  119. W_fc1 = weight_variable([1 * 1 * 64, 1024])
  120. b_fc1 = bias_variable([1024])
  121.  
  122. h_pool2_flat = tf.reshape(h_pool2, [-1, 1*1*64])
  123. h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
  124.  
  125. keep_prob = tf.placeholder(tf.float32)
  126. h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
  127.  
  128.  
  129. W_fc2 = weight_variable([1024, 1])
  130. b_fc2 = bias_variable([1])
  131.  
  132. y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
  133.  
  134. # loss
  135. cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
  136. train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
  137.  
  138. # accuracy
  139. correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
  140. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  141.  
  142. # train
  143. sess.run(tf.initialize_all_variables())
  144. for i in range(20000):
  145. if i%100 == 0:
  146. batch_x = train_x[ n*batch_size : (n+1)*batch_size ]
  147. batch_y = train_y[ n*batch_size : (n+1)*batch_size ]
  148. train_accuracy = accuracy.eval(feed_dict={x:batch_x, y_: batch_y, keep_prob: 1.0})
  149. print("step %d, training accuracy %g"%(i, train_accuracy))
  150. train_step.run(feed_dict={x: batch_x, y_: batch_y, keep_prob: 0.5})
  151.  
  152. # result
  153. print("test accuracy %g"%accuracy.eval(feed_dict={
  154. x: test_x, y_: test_y, keep_prob: 1.0}))
  155.  
  156. number of samples 1250
  157. elements_of_one_sample 16
  158. train_x.shape= (1125, 16)
  159. train_y.shape= (1125, 1)
  160. test_x.shape= (125, 16)
  161. test_y.shape= (125, 1)
  162. ############################## num_samples 1125
  163. ############################## num_batches 22
  164. 1.0
  165. step 0, training accuracy 1
  166. step 100, training accuracy 1
  167. step 200, training accuracy 1
  168. step 300, training accuracy 1
  169. step 400, training accuracy 1
  170. ....
  171. ....
  172. ....
  173. step 19500, training accuracy 1
  174. step 19600, training accuracy 1
  175. step 19700, training accuracy 1
  176. step 19800, training accuracy 1
  177. step 19900, training accuracy 1
  178. test accuracy 1
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement