Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import os
- import ipdb # ipdb.set_trace()
- import numpy as np
- from sklearn import datasets
- from sklearn.model_selection import train_test_split
- from sklearn.preprocessing import OneHotEncoder, MinMaxScaler, StandardScaler
- from sklearn.ensemble import RandomForestClassifier
- from keras.models import Sequential
- from keras.layers import Dense
- data = datasets.load_iris()
- X = data['data']
- Y = data['target']
- # use and the scaled input the encoded output
- enc = OneHotEncoder()
- Y_enc = enc.fit_transform(Y[:, np.newaxis]).toarray()
- Y = Y_enc
- scaler = MinMaxScaler()
- X_scaled = scaler.fit_transform(X)
- X = X_scaled
- X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=0)
- num_features = X.shape[1]
- num_classes = Y.shape[1]
- batch_size = 1
- epochs = 100
- model = Sequential()
- model.add(Dense(5, input_dim=num_features, activation='tanh'))
- model.add(Dense(num_classes, activation='softmax'))
- model.summary()
- model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
- #model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
- history = model.fit(X_train, Y_train,
- batch_size=batch_size,
- epochs=epochs,
- verbose=1,
- )
- score_train = model.evaluate(X_train, Y_train, verbose=0)
- print('Train loss:', score_train[0])
- print('Train accuracy:', score_train[1])
- score_test = model.evaluate(X_test, Y_test, verbose=0)
- print('Test loss:', score_test[0])
- print('Test accuracy:', score_test[1])
- import emlearn
- cmodel = emlearn.net.convert_keras(model, method='loadable') #methods = pymodule or loadable
- cmodel.save(file='iris_keras.h', name='iris')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement