Advertisement
Guest User

Untitled

a guest
Aug 25th, 2019
72
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.36 KB | None | 0 0
  1. import keras
  2. import numpy as np
  3. from keras.datasets import imdb
  4. from matplotlib import pyplot as plt
  5. from keras.models import Model
  6. from keras.layers import Flatten,Dense,Input,Conv1D,MaxPooling1D,Dropout,GlobalAveragePooling1D
  7. from keras.models import Model
  8. from keras.layers.embeddings import Embedding
  9. from keras.preprocessing import sequence
  10.  
  11. top_words = 5000
  12.  
  13. (x_train,y_train),(x_test,y_test) = imdb.load_data(num_words=top_words)
  14.  
  15.  
  16. def data_analysis():
  17. print("Training data shape:")
  18. print(f"X_train:{x_train.shape}")
  19. print(f"Y_train:{y_train.shape}")
  20.  
  21. print("Average length review:")
  22. lengths = [len(x) for x in x_train]
  23. print(f"Mean:{np.mean(lengths)} std:{np.std(lengths)}")
  24. plt.boxplot(lengths)
  25. plt.title(f"Mean:{np.mean(lengths)} std:{np.std(lengths)}")
  26. plt.show()
  27.  
  28.  
  29. #data_analysis()
  30.  
  31. max_words = 500
  32. x_train = sequence.pad_sequences(x_train,maxlen=max_words)
  33. x_test = sequence.pad_sequences(x_test,maxlen=max_words)
  34.  
  35. def test_dense():
  36. inputs = Input((max_words,))
  37. h = Embedding(top_words,128,input_length=max_words)(inputs)
  38. h = Flatten()(h)
  39. h = Dense(256,activation='relu')(h)
  40. outputs = Dense(1,activation='sigmoid')(h)
  41. model = Model(inputs=inputs,outputs=outputs)
  42. model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
  43. model.summary()
  44. history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=20, batch_size=128, verbose=2)
  45. scores = model.evaluate(x_test, y_test, verbose=0)
  46. print("Accuracy: %.2f%%" % (scores[1]*100))
  47. # Plot training & validation accuracy values
  48. plt.plot(history.history['acc'])
  49. plt.plot(history.history['val_acc'])
  50. plt.title('Model accuracy')
  51. plt.ylabel('Accuracy')
  52. plt.xlabel('Epoch')
  53. plt.legend(['Train', 'Test'], loc='upper left')
  54. plt.show()
  55.  
  56. # Plot training & validation loss values
  57. plt.plot(history.history['loss'])
  58. plt.plot(history.history['val_loss'])
  59. plt.title('Model loss')
  60. plt.ylabel('Loss')
  61. plt.xlabel('Epoch')
  62. plt.legend(['Train', 'Test'], loc='upper left')
  63. plt.show()
  64.  
  65.  
  66. def test_cnn():
  67. inputs = Input((max_words,))
  68. h = Embedding(top_words,128,input_length=max_words)(inputs)
  69. h = Conv1D(filters=8,kernel_size=3,padding='same',activation='relu')(h)
  70. h = MaxPooling1D(pool_size=2)(h)
  71. h = Conv1D(filters=16,kernel_size=3,padding='same',activation='relu')(h)
  72. h = GlobalAveragePooling1D()(h)
  73. h = Dropout(0.5)(h)
  74. outputs = Dense(1,activation='sigmoid')(h)
  75.  
  76. model = Model(inputs=inputs,outputs=outputs)
  77. model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
  78. model.summary()
  79. history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=20, batch_size=128)
  80. scores = model.evaluate(x_test, y_test, verbose=0)
  81. print("Accuracy: %.2f%%" % (scores[1]*100))
  82. # Plot training & validation accuracy values
  83. plt.plot(history.history['acc'])
  84. plt.plot(history.history['val_acc'])
  85. plt.title('Model accuracy')
  86. plt.ylabel('Accuracy')
  87. plt.xlabel('Epoch')
  88. plt.legend(['Train', 'Test'], loc='upper left')
  89. plt.show()
  90.  
  91. # Plot training & validation loss values
  92. plt.plot(history.history['loss'])
  93. plt.plot(history.history['val_loss'])
  94. plt.title('Model loss')
  95. plt.ylabel('Loss')
  96. plt.xlabel('Epoch')
  97. plt.legend(['Train', 'Test'], loc='upper left')
  98. plt.show()
  99.  
  100.  
  101. #test_dense()
  102. test_cnn()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement