Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- import torch.nn as nn
- import torch.optim as optim
- import torch.nn.functional as functional
- import pandas as pd
- import numpy as np
- import torch.utils.data as data_utils
- from torch.autograd import Variable
- import torch.nn.functional as F
- import math
- import matplotlib.pyplot as plt
- class NARX(nn.Module):
- def __init__(self, in_variables, hidden_size, out_size):
- super(NARX, self).__init__()
- self.hidden = nn.Sequential(nn.Linear(in_variables + out_size, hidden_size), nn.Tanh())
- self.out = nn.Linear(hidden_size, out_size)
- self.out_size = out_size
- def forward(self, sequence):
- y = torch.zeros((sequence.shape[1], self.out_size))
- for x in sequence:
- x = x * 0.5
- y = y * 0.5
- print(x.shape)
- print(y.shape)
- x = torch.cat((x, y), dim=1)
- x = self.hidden(x)
- x = self.out(x)
- return x
- df = pd.read_csv("C:/Users/Piotrek/Documents/magisterka/dataframe1p.csv", encoding = "ISO-8859-1")
- df2 = pd.read_csv("C:/Users/Piotrek/Documents/magisterka/dataframe1s.csv", encoding = "ISO-8859-1")
- df3 = pd.read_csv("C:/Users/Piotrek/Documents/magisterka/dataframe1pt.csv", encoding = "ISO-8859-1")
- df4 = pd.read_csv("C:/Users/Piotrek/Documents/magisterka/dataframe1st.csv", encoding = "ISO-8859-1")
- def chunks(l, n):
- # For item i in a range that is a length of l,
- for i in range(0, len(l), n):
- # Create an index range for l of n items:
- yield l[i:i+n]
- class DatasetRNN(data_utils.TensorDataset):
- ## Constructor
- def __init__(self, features, target, sequence_length=1):
- self.features = torch.split(features, sequence_length,dim = 0)
- self.target = torch.split(target, sequence_length,dim = 0)
- self.seq_length = sequence_length
- ## Override total dataset's length getter
- def __len__(self):
- return self.features.__len__()
- ## Override single items' getter
- def __getitem__(self, idx):
- return self.features[idx], self.target[idx][-1]
- WLevel = df['WLevel']
- del df['WLevel']
- train = DatasetRNN(torch.Tensor(np.array(df)), torch.Tensor(np.array(WLevel)),10)
- train_loader = data_utils.DataLoader(train, batch_size = 200, shuffle = False)
- time = list(chunks(df2['time'],200))
- WLevel2 = np.array(df3['WLevel'])
- del df3['WLevel']
- test = np.array(df3)
- t2 = np.array(df4['time'])
- input_size = 5
- def accuracy(model, data_x, data_y, pct_close, tm):
- n_items = len(data_y)
- X = torch.Tensor(data_x) # 2-d Tensor
- Y = torch.Tensor(data_y) # actual as 1-d Tensor
- oupt = model(X) # all predicted as 2-d Tensor
- pred = oupt.view(n_items) # all predicted as 1-d
- diff = Y-pred
- plt.figure(0)
- plt.plot (tm, Y.detach().numpy(),'b', linewidth = 0.5)
- plt.plot (tm, pred.detach().numpy(),'g', linewidth = 0.5)
- plt.show
- plt.savefig('filename2.png', dpi=1500)
- plt.figure(1)
- plt.plot (tm, diff.detach().numpy(),'b', linewidth = 0.5)
- plt.show
- plt.savefig('filename21.png', dpi=1500)
- n_correct = torch.sum((torch.abs(pred - Y) < torch.abs(pct_close * Y)))
- result = (n_correct.item() * 100.0 / n_items) # scalar
- MSE = (torch.sum((Y - pred) ** 2))/n_items
- MADE = (torch.sum(torch.abs(Y-pred)))/n_items
- MAPE = 100*(torch.sum((torch.abs(Y-pred))/(torch.max(Y)-torch.min(Y))))/n_items
- VAR = (torch.sum((pred - torch.mean(pred))**2))/(n_items-1)
- return result, MSE.item(), MADE.item(), MAPE.item(), VAR.item()
- net = NARX(input_size, 7, 1)
- optimizer = optim.Adam(lr=0.001, params=net.parameters())
- loss_func = torch.nn.MSELoss() # mean squared error
- i = 0
- for features, target in train_loader:
- optimizer.zero_grad()
- oupt = net(features)
- loss_obj = loss_func(oupt, target)
- loss_obj.backward()
- optimizer.step()
- t=time[i]
- i=i+1
- print("batch = %6d" % i, end="")
- print(" batch loss = %7.4f" % loss_obj.item(), end="")
- net = net.eval()
- #acc = accuracy(net, features, WLevel, 0.1, t)
- net = net.train()
- #print(acc)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement