Advertisement
mikolajmki

si_lab05

Nov 10th, 2022
186
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 2.92 KB | None | 0 0
  1. import numpy as np
  2. from matplotlib import pyplot as plt
  3. from matplotlib import rcParams
  4. import pandas as pd
  5. # rcParams['font.family'] = 'Times New Roman'
  6. # rcParams['font.size'] = 16
  7. # np.set_printoptions(precision=2)
  8. x = np.arange(0,1,0.01)
  9. y = x.copy()
  10. X,Y = np.meshgrid(x,y)
  11. wx = 0.1
  12. wy = 0.3
  13. S = wx*X+wy*Y
  14. out = S>0.15
  15. fig, ax = plt.subplots(1,1)
  16. ax.imshow(out)
  17. ticks = np.around(np.arange(-0.2,1.1,0.2), 3)
  18. ax.set_xticklabels(ticks)
  19. ax.set_yticklabels(ticks)
  20. plt.gca().invert_yaxis()
  21.  
  22. from sklearn.datasets import load_iris
  23. data = load_iris()
  24. y = data.target
  25. X = data.data
  26. y = pd.Categorical(y)
  27. y = pd.get_dummies(y).values
  28. class_num = y.shape[1]
  29.  
  30. from keras.models import Sequential
  31. from keras.layers import Input, Dense
  32. from keras.optimizers import Adam, RMSprop, SGD
  33. from keras.utils import plot_model
  34. class_num = 3
  35. model = Sequential()
  36. model.add(Dense(64, input_shape = (X.shape[1],), activation = 'relu'))
  37. # model.add(Dense(64, activation = 'relu'))
  38. # model.add(Dense(64, activation = 'relu'))
  39. model.add(Dense(class_num, activation = 'softmax'))
  40. learning_rate = 0.001
  41. model.compile(optimizer= Adam(learning_rate), loss='categorical_crossentropy', metrics=('accuracy'))
  42. model.summary()
  43. plot_model(model,to_file="my_model.png")
  44.  
  45. from sklearn.preprocessing import StandardScaler
  46. from sklearn.model_selection import *
  47. X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2)
  48. scaler = StandardScaler()
  49. X_train = scaler.fit_transform(X_train)
  50. X_test = scaler.transform(X_test)
  51. model.fit(X_train, y_train, batch_size=32, epochs=100, validation_data=(X_test, y_test), verbose=2)
  52.  
  53. from matplotlib import pyplot as plt
  54. historia = model.history.history
  55. floss_train = historia['loss']
  56. floss_test = historia['val_loss']
  57. acc_train = historia['accuracy']
  58. acc_test = historia['val_accuracy']
  59. fig,ax = plt.subplots(1,2, figsize=(20,10))
  60. epochs = np.arange(0, 100)
  61. ax[0].plot(epochs, floss_train, label = 'floss_train')
  62. ax[0].plot(epochs, floss_test, label = 'floss_test')
  63. ax[0].set_title('Funkcje strat')
  64. ax[0].legend()
  65. ax[1].set_title('Dokladnosci')
  66. ax[1].plot(epochs, acc_train, label = 'acc_train')
  67. ax[1].plot(epochs, acc_test, label = 'acc_test')
  68. ax[1].legend()
  69.  
  70.  
  71. # from sklearn.model_selection import KFold
  72. # from sklearn.metrics import accuracy_score
  73. # X_train, X_test, y_train, y_test = train_test_split(X,y,
  74. #  test_size=0.2)
  75. # accs = []
  76. # scaler = StandardScaler()
  77. # for train_index, test_index in KFold(5).split(X_train):
  78. #     X_train_cv = X_train[train_index,:]
  79. #     X_test_cv = X_train[test_index,:]
  80. #     y_train_cv = y_train[train_index,:]
  81. # y_test_cv = y_train[test_index,:]
  82. # X_train_cv = scaler.fit_transform(X_train_cv)
  83. # X_test_cv = scaler.transform(X_test_cv)
  84. # model.fit(X_train_cv, y_train_cv, batch_size=32,
  85. #  epochs=100, validation_data=
  86. #  (X_test_cv,y_test_cv), verbose=2)
  87. # y_pred = model.predict(X_test_cv).argmax(axis=1)
  88. # y_test_cv = y_test_cv.argmax(axis=1)
  89.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement