SHARE
TWEET

Untitled

a guest Jun 27th, 2019 80 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. #Convolutional Neural Network
  2.  
  3. # Importing the Keras libraries and packages
  4.     from keras.models import Sequential
  5.     from keras.layers import Convolution2D
  6.     from keras.layers import MaxPooling2D
  7.     from keras.layers import Flatten
  8.     from keras.layers import Dense
  9.     from keras.models import model_from_json
  10.     import os
  11. #initialize the cnn
  12.     classifier = Sequential()
  13.  
  14. #Step 1 convolution
  15.     classifier.add(Convolution2D(32, 3, 3, input_shape = (64, 64, 3), activation = 'relu'))
  16.  
  17. #Step 2 Pooling
  18.     classifier.add(MaxPooling2D(pool_size = (2,2)))
  19.  
  20.  
  21. #Step 3 Flattening
  22.     classifier.add(Flatten())
  23.  
  24. #Step 4 Full Connection
  25.     classifier.add(Dense(output_dim = 128, activation = 'relu'))
  26.     classifier.add(Dense(output_dim = 64, activation = 'relu'))
  27.     classifier.add(Dense(output_dim = 32, activation = 'relu'))
  28.     classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
  29.  
  30. #Compiling the CNN
  31.     classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
  32.  
  33. #Part 2 Fitting the CNN to the images
  34.     from keras.preprocessing.image import ImageDataGenerator
  35.  
  36.     train_datagen = ImageDataGenerator(
  37.         rescale=1/.255,
  38.         shear_range=0.2,
  39.         zoom_range=0.2,
  40.         horizontal_flip=True)
  41.  
  42.     test_datagen = ImageDataGenerator(rescale=1./255)
  43.  
  44.     training_set = train_datagen.flow_from_directory(
  45.         'dataset/training_set',
  46.         target_size=(64, 64),
  47.         batch_size=32,
  48.         class_mode='binary')
  49.  
  50.     test_set = test_datagen.flow_from_directory(
  51.         'dataset/test_set',
  52.         target_size=(64, 64),
  53.         batch_size=32,
  54.         class_mode='binary')
  55.  
  56.     from IPython.display import display
  57.     from PIL import Image
  58.  
  59.     classifier.fit_generator(
  60.         training_set,
  61.         steps_per_epoch=1589,
  62.         epochs=10,
  63.         validation_data=test_set,
  64.         validation_steps=378)
  65.  
  66.     import numpy as np
  67.     from keras.preprocessing import image
  68.     test_image = image.load_img('dataset/test_set/cats/cat.4012.jpg', target_size = (64,64))
  69.     test_image = image.img_to_array(test_image)
  70.     test_image = np.expand_dims(test_image, axis = 0)
  71.     result = classifier.predict(test_image)
  72.     training_set.class_indices
  73.     if result[0][0] >= 0.5:
  74.         prediction = 'dog'
  75.     else:
  76.         prediction = 'cat'
  77.     print(prediction)
  78.  
  79. //examples from deep-learning with python
  80.     from keras.datasets import imdb
  81.  
  82.     (train_data, train_labels), (test_data, test_labels) =
  83.     imdb.load_data(num_words = 10000)
  84.     import numpy as np
  85.  
  86.     def vectorize_sequences(sequences, dimension=10000):
  87.         results = np.zeros((len(sequences), dimension))
  88.         for i,sequence in enumerate(sequences):
  89.             results[i, sequence]=1.
  90.         return results
  91.     x_train = vectorize_sequences(train_data)
  92.     x_test =  vectorize_sequences(test_data)
  93.  
  94.     from keras import models
  95.     from keras import layers
  96.  
  97.     model = models.Sequential()
  98.     model.add(layers.Dense(16, activation='relu',input_shape=(10000,)))
  99.     model.add(layers.Dense(16, activation='relu'))
  100.     model.add(layers.Dense(1, activation='sigmoid'))
  101.     x_val = x_train[:10000]
  102.     partial_x_train = x_train[10000:]
  103.     y_val = y_train[:10000]
  104.     partial_y_train = y_train[10000:]
  105.     model.compile(optimizer='rmsprop',
  106.              loss='binary_crossentropy',
  107.              metrics=['acc'])
  108.     history = model.fit(partial_x_train,
  109.                    partial_y_train,
  110.                     epochs=20,
  111.                    batch_size=512,
  112.                    validation_data=(x_val,y_val))
  113.  
  114.  
  115. dogs an cats output:
  116.  
  117.  
  118. deeplearning imdb example output:
  119. WARNING:tensorflow:From C:UsersMikeAnaconda3libsite-packagestensorflowpythonopsmath_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
  120. Instructions for updating:
  121. Use tf.cast instead.
  122. Train on 15000 samples, validate on 10000 samples
  123. Epoch 1/20
  124. 15000/15000 [==============================] - 4s 246us/step - loss: 0.6932 - acc: 0.4982 - val_loss: 0.6932 - val_acc: 0.4947
  125. Epoch 2/20
  126. 15000/15000 [==============================] - 2s 115us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6932 - val_acc: 0.4947
  127. Epoch 3/20
  128. 15000/15000 [==============================] - 2s 115us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6932 - val_acc: 0.4947
  129. Epoch 4/20
  130. 15000/15000 [==============================] - 2s 119us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6932 - val_acc: 0.4947
  131. Epoch 5/20
  132. 15000/15000 [==============================] - 2s 120us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6932 - val_acc: 0.4947
  133. Epoch 6/20
  134. 15000/15000 [==============================] - 2s 119us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6933 - val_acc: 0.4947
  135. Epoch 7/20
  136. 15000/15000 [==============================] - 2s 113us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6932 - val_acc: 0.4947
  137. Epoch 8/20
  138. 15000/15000 [==============================] - 2s 113us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6932 - val_acc: 0.4947
  139. Epoch 9/20
  140. 15000/15000 [==============================] - 2s 119us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6933 - val_acc: 0.4947
  141. Epoch 10/20
  142. 15000/15000 [==============================] - 2s 122us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6933 - val_acc: 0.4947
  143. Epoch 11/20
  144. 15000/15000 [==============================] - 2s 116us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6933 - val_acc: 0.4947
  145. Epoch 12/20
  146. 15000/15000 [==============================] - 2s 116us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6933 - val_acc: 0.4947
  147. Epoch 13/20
  148. 15000/15000 [==============================] - 2s 121us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6933 - val_acc: 0.4947
  149. Epoch 14/20
  150. 15000/15000 [==============================] - 2s 127us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6932 - val_acc: 0.4947
  151. Epoch 15/20
  152. 15000/15000 [==============================] - 2s 121us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6932 - val_acc: 0.4947
  153. Epoch 16/20
  154. 15000/15000 [==============================] - 2s 113us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6932 - val_acc: 0.4947
  155. Epoch 17/20
  156. 15000/15000 [==============================] - 2s 115us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6932 - val_acc: 0.4947
  157. Epoch 18/20
  158. 15000/15000 [==============================] - 2s 114us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6932 - val_acc: 0.4947
  159. Epoch 19/20
  160. 15000/15000 [==============================] - 2s 114us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6932 - val_acc: 0.4947
  161. Epoch 20/20
  162. 15000/15000 [==============================] - 2s 119us/step - loss: 0.6931 - acc: 0.5035 - val_loss: 0.6932 - val_acc: 0.4947
RAW Paste Data
We use cookies for various purposes including analytics. By continuing to use Pastebin, you agree to our use of cookies as described in the Cookies Policy. OK, I Understand
Not a member of Pastebin yet?
Sign Up, it unlocks many cool features!
 
Top