Advertisement
Guest User

Untitled

a guest
Dec 12th, 2019
138
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 2.55 KB | None | 0 0
  1. # -*- coding: utf-8 -*-
  2. """
  3. Created on Tue Dec 10 11:08:24 2019
  4.  
  5. @author: ShardeR
  6. """
  7.  
  8.  
  9.  
  10.  
  11. import pandas as pd
  12. import numpy as np
  13. from sklearn.preprocessing import MinMaxScaler
  14. #from keras.models import Sequential
  15. from sklearn.neural_network import MLPRegressor
  16. from sklearn.model_selection import train_test_split
  17. from pyswarms.utils.plotters import plot_cost_history, plot_contour, plot_surface
  18. #from keras.layers import Dense, Dropout, LSTM
  19. import matplotlib.pyplot as plt
  20. import pyswarms as ps
  21. import SwarmPackagePy as sp
  22. from SwarmPackagePy import testFunctions as tf
  23.  
  24.  
  25.  
  26. df = pd.read_csv('aadr.us.txt', names=["Data","Open","High","Low","Close"], usecols=[0,1,2,3,4] , parse_dates=True, index_col=0)
  27.  
  28.  
  29.  
  30. t_train, val_train, t_target, val_target = train_test_split(df, df['Open'] , test_size=0.2 , shuffle=False)
  31.  
  32.  
  33. t_target = np.array(t_target[1:])
  34. val_train =  np.array(val_train[0:(len(val_train)-1)])
  35. t_train = np.array(t_train[0:(len(t_train)-1)])
  36. val_target = np.array(val_target[1:])
  37.  
  38. t_target = np.reshape(t_target,(len(t_target),1))
  39.  
  40. val_target = np.reshape(val_target,(len(val_target),1))
  41.  
  42.  
  43. scaler = MinMaxScaler(feature_range=(0, 1))
  44. t_target = scaler.fit_transform(t_target)
  45. val_train = scaler.fit_transform(val_train)
  46. t_train = scaler.fit_transform(t_train)
  47. val_target = scaler.fit_transform(val_target)
  48.  
  49.  
  50.  
  51.  
  52. #activation func  --- tanh , identity , logistic , relu
  53. #solver ---- sgd , lbfgs , adam
  54.  
  55. def mlp_regress(hyperparamets):
  56.     regress = MLPRegressor(hidden_layer_sizes=(100,) , activation='logistic', solver='adam', batch_size='auto', learning_rate_init=(hyperparamets))
  57.     regress.fit(t_train,t_target.ravel())
  58.     regress.predict(val_train)
  59.     score = regress.score(val_train,val_target)
  60.     loss = (1-score)
  61.     print(regress)
  62.     return loss
  63.  
  64. def f(x):
  65.     n_particles = x.shape[0]
  66.     j = [mlp_regress(x[i]) for i in range(n_particles)]
  67.     return np.array(j)
  68.  
  69. #create bounds for hyperparamets
  70.  
  71. #min_bound_lr = 0.02
  72. #max_bound_lr = 0.1
  73. #boundslr = (min_bound_lr,max_bound_lr)
  74.  
  75. # Initialize swarm
  76. #options = {'c1': 0.5 , 'c2': 0.3, 'w':0.9}
  77. # Call instance of PSO
  78. #bounds_lr = (np.array([0.01]),np.array([0.05]))
  79. #dimensions = ( 1 )
  80. #optimizer = ps.single.GlobalBestPSO(n_particles=20, dimensions=dimensions, options=options , bounds=bounds_lr)
  81.  
  82. # Perform optimization
  83. #best_cost, pos = optimizer.optimize(f , iters=100)
  84.  
  85.  
  86. alh = sp.ca(10, f , 0.5 , 1 , 1 ,5, mr=2, smp=2,
  87.                  spc=False, cdc=1, srd=0.1, w=0.1, c=1.05, csi=0.6)
  88.    
  89. print ("best score :",alh.get_Gbest())
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement