Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # -*- coding: utf-8 -*-
- """
- Created on Tue Dec 10 11:08:24 2019
- @author: ShardeR
- """
- import pandas as pd
- import numpy as np
- from sklearn.preprocessing import MinMaxScaler
- #from keras.models import Sequential
- from sklearn.neural_network import MLPRegressor
- from sklearn.model_selection import train_test_split
- from pyswarms.utils.plotters import plot_cost_history, plot_contour, plot_surface
- #from keras.layers import Dense, Dropout, LSTM
- import matplotlib.pyplot as plt
- import pyswarms as ps
- import SwarmPackagePy as sp
- from SwarmPackagePy import testFunctions as tf
- df = pd.read_csv('aadr.us.txt', names=["Data","Open","High","Low","Close"], usecols=[0,1,2,3,4] , parse_dates=True, index_col=0)
- t_train, val_train, t_target, val_target = train_test_split(df, df['Open'] , test_size=0.2 , shuffle=False)
- t_target = np.array(t_target[1:])
- val_train = np.array(val_train[0:(len(val_train)-1)])
- t_train = np.array(t_train[0:(len(t_train)-1)])
- val_target = np.array(val_target[1:])
- t_target = np.reshape(t_target,(len(t_target),1))
- val_target = np.reshape(val_target,(len(val_target),1))
- scaler = MinMaxScaler(feature_range=(0, 1))
- t_target = scaler.fit_transform(t_target)
- val_train = scaler.fit_transform(val_train)
- t_train = scaler.fit_transform(t_train)
- val_target = scaler.fit_transform(val_target)
- #activation func --- tanh , identity , logistic , relu
- #solver ---- sgd , lbfgs , adam
- def mlp_regress(hyperparamets):
- regress = MLPRegressor(hidden_layer_sizes=(100,) , activation='logistic', solver='adam', batch_size='auto', learning_rate_init=(hyperparamets))
- regress.fit(t_train,t_target.ravel())
- regress.predict(val_train)
- score = regress.score(val_train,val_target)
- loss = (1-score)
- print(regress)
- return loss
- def f(x):
- n_particles = x.shape[0]
- j = [mlp_regress(x[i]) for i in range(n_particles)]
- return np.array(j)
- #create bounds for hyperparamets
- #min_bound_lr = 0.02
- #max_bound_lr = 0.1
- #boundslr = (min_bound_lr,max_bound_lr)
- # Initialize swarm
- #options = {'c1': 0.5 , 'c2': 0.3, 'w':0.9}
- # Call instance of PSO
- #bounds_lr = (np.array([0.01]),np.array([0.05]))
- #dimensions = ( 1 )
- #optimizer = ps.single.GlobalBestPSO(n_particles=20, dimensions=dimensions, options=options , bounds=bounds_lr)
- # Perform optimization
- #best_cost, pos = optimizer.optimize(f , iters=100)
- alh = sp.ca(10, f , 0.5 , 1 , 1 ,5, mr=2, smp=2,
- spc=False, cdc=1, srd=0.1, w=0.1, c=1.05, csi=0.6)
- print ("best score :",alh.get_Gbest())
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement