Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # -*- coding: utf-8 -*-
- """
- Created on Tue Dec 10 11:08:24 2019
- @author: ShardeR
- """
- #Imports
- import pandas as pd
- import numpy as np
- from sklearn.preprocessing import MinMaxScaler
- from sklearn.neural_network import MLPRegressor
- from sklearn.model_selection import train_test_split
- from pyswarms.utils.plotters import plot_cost_history, plot_contour, plot_surface
- import matplotlib.pyplot as plt
- import pyswarms as ps
- import SwarmPackagePy as sp
- from SwarmPackagePy import testFunctions as tf
- ##
- ##Tratamento de dados
- df = pd.read_csv('aadr.us.txt', names=["Data","Open","High","Low","Close"], usecols=[0,1,2,3,4] , parse_dates=True, index_col=0)
- t_train, val_train, t_target, val_target = train_test_split(df, df['Open'] , test_size=0.2 , shuffle=False)
- t_target = np.array(t_target[1:])
- val_train = np.array(val_train[0:(len(val_train)-1)])
- t_train = np.array(t_train[0:(len(t_train)-1)])
- val_target = np.array(val_target[1:])
- t_target = np.reshape(t_target,(len(t_target),1))
- val_target = np.reshape(val_target,(len(val_target),1))
- scaler = MinMaxScaler(feature_range=(0, 1))
- t_target = scaler.fit_transform(t_target)
- val_train = scaler.fit_transform(val_train)
- t_train = scaler.fit_transform(t_train)
- val_target = scaler.fit_transform(val_target)
- ##
- #activation func --- tanh , identity , logistic , relu
- #solver ---- sgd , lbfgs , adam
- def mlp_regress(hyperparamets):
- model = MLPRegressor(activation = 'logistic', hidden_layer_sizes=(100,),learning_rate_init=(hyperparamets[0]), momentum = (hyperparamets[1]))
- model.fit(t_train, t_target)
- score = model.score(t_train, t_target)
- loss = (1-score)
- print(loss)
- return loss
- def mlp_regress2(best_sc):
- model = MLPRegressor(activation = 'logistic', hidden_layer_sizes=(100,),learning_rate_init=(best_sc[0]), momentum = (best_sc[1]))
- model.fit(t_train, t_target)
- result = model.predict(val_train)
- score = model.score(t_train, t_target)
- return score, result
- def f(x):
- n_particles = x.shape[0]
- j = [mlp_regress(x[i]) for i in range(n_particles)]
- return np.array(j)
- #create bounds for hyperparamets
- #min_bound_lr = 0.02
- #max_bound_lr = 0.1
- #boundslr = (min_bound_lr,max_bound_lr)
- # Initialize swarm
- #options = {'c1': 0.5 , 'c2': 0.3, 'w':0.9}
- # Call instance of PSO
- #bounds_lr = (np.array([0.01,0.01]),np.array([0.2,0.2]))
- #dimensions = ( 2 )
- #optimizer = ps.single.GlobalBestPSO(n_particles=20, dimensions=dimensions, options=options , bounds=bounds_lr)
- # Perform optimization
- #best_cost, pos = optimizer.optimize(f , iters=100)
- #swarm visualization
- #plot_cost_history(optimizer.cost_history)
- #plt.show()
- def f1(x):
- n_particles = x.shape[0]
- for i in [range(n_particles)]:
- valor=x[i]
- j = mlp_regress(valor)
- return np.array(j)
- ##FireWork Algo
- alh = sp.fwa(20, f1, 0.1 , 0.8, 2, 5, m1=0.5 , m2=0.3)
- best_hyp=alh.get_Gbest()
- print ("best score :",best_hyp)
- #Plots Resultados
- rand_search = np.array([0.1,0,1])
- score2, rand_res = mlp_regress2(rand_search)
- sc ,res = mlp_regress2(best_hyp)
- plt.plot(res, color='c')
- plt.plot(rand_res, color='g')
- plt.plot(val_target, color='r')
- plt.title("Modelo")
- plt.ylabel("price normalized")
- plt.xlabel("Dias")
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement