Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import pandas as pd
- from sklearn.datasets import load_iris
- data = load_iris()
- y = data.target
- X = data.data
- y = pd.Categorical(y)
- y = pd.get_dummies(y).values
- class_num = y.shape[1]
- from sklearn.model_selection import KFold
- from sklearn.metrics import accuracy_score
- from sklearn.model_selection import train_test_split
- from sklearn.preprocessing import StandardScaler
- from keras.models import Sequential
- from keras.layers import Input, Dense
- from keras.optimizers import Adam, RMSprop, SGD
- from keras.utils import plot_model
- model = Sequential()
- model.add(Dense(64, input_shape = (X.shape[1],), activation = 'relu'))
- model.add(Dense(64, activation = 'relu'))
- model.add(Dense(64, activation = 'relu'))
- model.add(Dense(class_num, activation = 'softmax'))
- learning_rate = 0.0001
- model.compile(optimizer= Adam(learning_rate),
- loss='categorical_crossentropy',
- metrics=('accuracy'))
- model.summary()
- X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2)
- res = []
- accs = []
- scaler = StandardScaler()
- for train_index, test_index in KFold(5).split(X_train):
- X_train_cv = X_train[train_index,:]
- X_test_cv = X_train[test_index,:]
- y_train_cv = y_train[train_index,:]
- y_test_cv = y_train[test_index,:]
- X_train_cv = scaler.fit_transform(X_train_cv)
- X_test_cv = scaler.transform(X_test_cv)
- model.fit(X_train, y_train, batch_size=32,
- epochs=100, validation_data=(X_test, y_test),
- verbose=2)
- from sklearn.metrics import confusion_matrix
- from sklearn.preprocessing import StandardScaler
- from sklearn.metrics import f1_score
- import numpy as np
- # X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2)
- # scaler = StandardScaler()
- # X_train = scaler.fit_transform(X_train)
- # X_test = scaler.transform(X_test)
- # model.fit(X_train, y_train, batch_size=32, epochs=100,
- # validation_data=(X_test, y_test), verbose=2)
- # fold_count = 5
- # weights = model.get_weights()
- # for train_index, val_index in KFold(fold_count).split(X_train):
- # X_train_cv = X_train[train_index, :]
- # x_val_cv = X_train[val_index, :]
- # y_train_cv = y_train[train_index, :]
- # y_val_cv = y_train[val_index, :]
- # model.set_weights(weights)
- # # model = train_function(model, X_train_cv, y_train_cv, x_val_cv, y_val_cv, verbose=2)
- # model.fit(X_train, y_train, batch_size=32, epochs=100, validation_data=(X_test, y_test), verbose=2)
- # his_acc = model.history.history['accuracy']
- # his_val_acc = model.history.history['val_accuracy']
- # his_val_los = model.history.history['val_loss']
- # res.append([np.max(his_acc), np.argmax(his_acc), np.max(his_val_acc), np.argmax(his_val_acc),
- # np.min(his_val_los), np.argmin(his_val_los)])
- # y_pred_cv = model.predict(x_val_cv)
- # y_pred_cv[y_pred_cv >= 0.5] = 1
- # y_pred_cv[y_pred_cv < 0.5] = 0
- # print(">>>>>> 1st class <<<<<<")
- # print(confusion_matrix(y_val_cv[:, 0], y_pred_cv[:, 0]))
- # print(accuracy_score(y_val_cv[:, 0], y_pred_cv[:, 0]))
- # print(f1_score(y_val_cv[:, 0], y_pred_cv[:, 0]))
- # print(">>>>>> 2nd class <<<<<<")
- # print(confusion_matrix(y_val_cv[:, 1], y_pred_cv[:, 1]))
- # print(accuracy_score(y_val_cv[:, 1], y_pred_cv[:, 1]))
- # print(f1_score(y_val_cv[:, 1], y_pred_cv[:, 1]))
- # print(">>>>>> 1st class <<<<<<")
- # print(confusion_matrix(y_val_cv[:, 2], y_pred_cv[:, 2]))
- # print(accuracy_score(y_val_cv[:, 2], y_pred_cv[:, 2]))
- # print(f1_score(y_val_cv[:, 2], y_pred_cv[:, 2]))
- # accs.append(accuracy_score(y_val_cv, y_pred_cv))
- # print("res: ", res)
- # print("acc: ", accs)
- # print("accs mean: ", np.mean(accs))
- from keras.regularizers import l2, l1
- neuron_num = 64
- model = Sequential()
- model.add(Dense(neuron_num, activation='relu', input_shape=(X.shape[1], ), kernel_regularizers = 12(0.01)))
- from keras.layers import Dense, BatchNormalization
- from keras.layers import Dropout, GaussianNoise
- from keras.layers import LayerNormalization
- from keras.models import Sequential
- from keras.optimizers import Adam
- neuron_num = 64
- do_rate = 0.5
- noise = 0.1
- learning_rate = 0.001
- block = [
- Dense,
- LayerNormalization(),
- BatchNormalization,
- Dropout,
- GaussianNoise]
- args = [(neuron_num,'selu'),(),(),(do_rate,),(noise,)]
- model = Sequential()
- model.add(Dense(neuron_num, activation='relu',
- put_shape = (X.shape[1],)))
- repeat_num = 2
- for i in range(repeat_num):
- for layer,arg in zip(block, args):
- model.add(layer(*arg))
- model.add(Dense(1, activation='sigmoid'))
- model.compile(optimizer= Adam(learning_rate),
- loss='binary_crossentropy', metrics=('accuracy', 'Recall', 'Precision'))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement