Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- from matplotlib import pyplot as plt
- from matplotlib import rcParams
- import pandas as pd
- # rcParams['font.family'] = 'Times New Roman'
- # rcParams['font.size'] = 16
- # np.set_printoptions(precision=2)
- x = np.arange(0,1,0.01)
- y = x.copy()
- X,Y = np.meshgrid(x,y)
- wx = 0.1
- wy = 0.3
- S = wx*X+wy*Y
- out = S>0.15
- fig, ax = plt.subplots(1,1)
- ax.imshow(out)
- ticks = np.around(np.arange(-0.2,1.1,0.2), 3)
- ax.set_xticklabels(ticks)
- ax.set_yticklabels(ticks)
- plt.gca().invert_yaxis()
- from sklearn.datasets import load_iris
- data = load_iris()
- y = data.target
- X = data.data
- y = pd.Categorical(y)
- y = pd.get_dummies(y).values
- class_num = y.shape[1]
- from keras.models import Sequential
- from keras.layers import Input, Dense
- from keras.optimizers import Adam, RMSprop, SGD
- from keras.utils import plot_model
- class_num = 3
- model = Sequential()
- model.add(Dense(64, input_shape = (X.shape[1],), activation = 'relu'))
- # model.add(Dense(64, activation = 'relu'))
- # model.add(Dense(64, activation = 'relu'))
- model.add(Dense(class_num, activation = 'softmax'))
- learning_rate = 0.001
- model.compile(optimizer= Adam(learning_rate), loss='categorical_crossentropy', metrics=('accuracy'))
- model.summary()
- plot_model(model,to_file="my_model.png")
- from sklearn.preprocessing import StandardScaler
- from sklearn.model_selection import *
- X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2)
- scaler = StandardScaler()
- X_train = scaler.fit_transform(X_train)
- X_test = scaler.transform(X_test)
- model.fit(X_train, y_train, batch_size=32, epochs=100, validation_data=(X_test, y_test), verbose=2)
- from matplotlib import pyplot as plt
- historia = model.history.history
- floss_train = historia['loss']
- floss_test = historia['val_loss']
- acc_train = historia['accuracy']
- acc_test = historia['val_accuracy']
- fig,ax = plt.subplots(1,2, figsize=(20,10))
- epochs = np.arange(0, 100)
- ax[0].plot(epochs, floss_train, label = 'floss_train')
- ax[0].plot(epochs, floss_test, label = 'floss_test')
- ax[0].set_title('Funkcje strat')
- ax[0].legend()
- ax[1].set_title('Dokladnosci')
- ax[1].plot(epochs, acc_train, label = 'acc_train')
- ax[1].plot(epochs, acc_test, label = 'acc_test')
- ax[1].legend()
- # from sklearn.model_selection import KFold
- # from sklearn.metrics import accuracy_score
- # X_train, X_test, y_train, y_test = train_test_split(X,y,
- # test_size=0.2)
- # accs = []
- # scaler = StandardScaler()
- # for train_index, test_index in KFold(5).split(X_train):
- # X_train_cv = X_train[train_index,:]
- # X_test_cv = X_train[test_index,:]
- # y_train_cv = y_train[train_index,:]
- # y_test_cv = y_train[test_index,:]
- # X_train_cv = scaler.fit_transform(X_train_cv)
- # X_test_cv = scaler.transform(X_test_cv)
- # model.fit(X_train_cv, y_train_cv, batch_size=32,
- # epochs=100, validation_data=
- # (X_test_cv,y_test_cv), verbose=2)
- # y_pred = model.predict(X_test_cv).argmax(axis=1)
- # y_test_cv = y_test_cv.argmax(axis=1)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement