Advertisement
mikolajmki

si_lab06

Nov 17th, 2022
710
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.63 KB | None | 0 0
  1. import pandas as pd
  2. from sklearn.datasets import load_iris
  3. data = load_iris()
  4. y = data.target
  5. X = data.data
  6. y = pd.Categorical(y)
  7. y = pd.get_dummies(y).values
  8. class_num = y.shape[1]
  9. from sklearn.model_selection import KFold
  10. from sklearn.metrics import accuracy_score
  11. from sklearn.model_selection import train_test_split
  12. from sklearn.preprocessing import StandardScaler
  13.  
  14. from keras.models import Sequential
  15. from keras.layers import Input, Dense
  16. from keras.optimizers import Adam, RMSprop, SGD
  17. from keras.utils import plot_model
  18. model = Sequential()
  19. model.add(Dense(64, input_shape = (X.shape[1],), activation = 'relu'))
  20. model.add(Dense(64, activation = 'relu'))
  21. model.add(Dense(64, activation = 'relu'))
  22. model.add(Dense(class_num, activation = 'softmax'))
  23. learning_rate = 0.0001
  24. model.compile(optimizer= Adam(learning_rate),
  25.  loss='categorical_crossentropy',
  26. metrics=('accuracy'))
  27. model.summary()
  28.  
  29. X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2)
  30. res = []
  31. accs = []
  32. scaler = StandardScaler()
  33. for train_index, test_index in KFold(5).split(X_train):
  34.  X_train_cv = X_train[train_index,:]
  35.  X_test_cv = X_train[test_index,:]
  36.  y_train_cv = y_train[train_index,:]
  37.  y_test_cv = y_train[test_index,:]
  38.  X_train_cv = scaler.fit_transform(X_train_cv)
  39.  X_test_cv = scaler.transform(X_test_cv)
  40.  model.fit(X_train, y_train, batch_size=32,
  41.  epochs=100, validation_data=(X_test, y_test),
  42.  verbose=2)
  43.  
  44. from sklearn.metrics import confusion_matrix
  45. from sklearn.preprocessing import StandardScaler
  46. from sklearn.metrics import f1_score
  47. import numpy as np
  48. # X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2)
  49. # scaler = StandardScaler()
  50. # X_train = scaler.fit_transform(X_train)
  51. # X_test = scaler.transform(X_test)
  52. # model.fit(X_train, y_train, batch_size=32, epochs=100,
  53. #  validation_data=(X_test, y_test), verbose=2)
  54. # fold_count = 5
  55.  
  56. # weights = model.get_weights()
  57. # for train_index, val_index in KFold(fold_count).split(X_train):
  58. #     X_train_cv = X_train[train_index, :]
  59. #     x_val_cv = X_train[val_index, :]
  60. #     y_train_cv = y_train[train_index, :]
  61. #     y_val_cv = y_train[val_index, :]
  62. #     model.set_weights(weights)
  63. #     # model = train_function(model, X_train_cv, y_train_cv, x_val_cv, y_val_cv, verbose=2)
  64. #     model.fit(X_train, y_train, batch_size=32, epochs=100, validation_data=(X_test, y_test), verbose=2)
  65.    
  66.     # his_acc = model.history.history['accuracy']
  67.     # his_val_acc = model.history.history['val_accuracy']
  68.     # his_val_los = model.history.history['val_loss']
  69.     # res.append([np.max(his_acc), np.argmax(his_acc), np.max(his_val_acc), np.argmax(his_val_acc),
  70.     #             np.min(his_val_los), np.argmin(his_val_los)])
  71.    
  72.     # y_pred_cv = model.predict(x_val_cv)
  73.    
  74.     # y_pred_cv[y_pred_cv >= 0.5] = 1
  75.     # y_pred_cv[y_pred_cv < 0.5] = 0
  76.    
  77.     # print(">>>>>> 1st class <<<<<<")
  78.     # print(confusion_matrix(y_val_cv[:, 0], y_pred_cv[:, 0]))
  79.     # print(accuracy_score(y_val_cv[:, 0], y_pred_cv[:, 0]))
  80.     # print(f1_score(y_val_cv[:, 0], y_pred_cv[:, 0]))
  81.    
  82.     # print(">>>>>> 2nd class <<<<<<")
  83.     # print(confusion_matrix(y_val_cv[:, 1], y_pred_cv[:, 1]))
  84.     # print(accuracy_score(y_val_cv[:, 1], y_pred_cv[:, 1]))
  85.     # print(f1_score(y_val_cv[:, 1], y_pred_cv[:, 1]))
  86.    
  87.     # print(">>>>>> 1st class <<<<<<")
  88.     # print(confusion_matrix(y_val_cv[:, 2], y_pred_cv[:, 2]))
  89.     # print(accuracy_score(y_val_cv[:, 2], y_pred_cv[:, 2]))
  90.     # print(f1_score(y_val_cv[:, 2], y_pred_cv[:, 2]))
  91.    
  92.     # accs.append(accuracy_score(y_val_cv, y_pred_cv))
  93.    
  94.     # print("res: ", res)
  95.     # print("acc: ", accs)
  96.     # print("accs mean: ", np.mean(accs))
  97.  
  98. from keras.regularizers import l2, l1
  99. neuron_num = 64
  100. model = Sequential()
  101. model.add(Dense(neuron_num, activation='relu', input_shape=(X.shape[1], ), kernel_regularizers = 12(0.01)))
  102.  
  103. from keras.layers import Dense, BatchNormalization
  104. from keras.layers import Dropout, GaussianNoise
  105. from keras.layers import LayerNormalization
  106. from keras.models import Sequential
  107. from keras.optimizers import Adam
  108. neuron_num = 64
  109. do_rate = 0.5
  110. noise = 0.1
  111. learning_rate = 0.001
  112. block = [
  113. Dense,
  114. LayerNormalization(),
  115. BatchNormalization,
  116. Dropout,
  117. GaussianNoise]
  118. args = [(neuron_num,'selu'),(),(),(do_rate,),(noise,)]
  119. model = Sequential()
  120. model.add(Dense(neuron_num, activation='relu',
  121. put_shape = (X.shape[1],)))
  122. repeat_num = 2
  123. for i in range(repeat_num):
  124.   for layer,arg in zip(block, args):
  125.     model.add(layer(*arg))
  126.   model.add(Dense(1, activation='sigmoid'))
  127.   model.compile(optimizer= Adam(learning_rate),
  128.   loss='binary_crossentropy',  metrics=('accuracy', 'Recall', 'Precision'))
  129.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement