Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- import torch.nn as nn
- import torch.nn.functional as F
- import torch.optim as optim
- import numpy as np
- class NeuralNetworkCalculator(nn.Module):
- def __init__(self):
- super(NeuralNetworkCalculator, self).__init__()
- self.layer_1 =
- self.layer_2 = torch.nn.Linear(3, 1)
- def forward(self, x):
- x = F.relu(self.layer_1(x))
- x = F.relu(self.layer_2(x))
- return x
- net = NeuralNetworkCalculator()
- criterion = nn.CrossEntropyLoss()
- optimizer = optim.Adam(net.parameters(), lr = 0.0001)
- device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
- net.to(device)
- ##############################
- # REPLACE WITH YOUR OWN DATA #
- ##############################
- x = torch.from_numpy(np.random.rand(1042,32,)).type(dtype=torch.float).to(device) #(samples, channels, heigth, width)
- y = torch.from_numpy(np.random.randint(1,size=(1042, 32))).to(device) #(number_of_classes, samples)
- for epoch in range(2): # loop over the dataset multiple times
- running_loss = 0.0
- for i in range(0, x.shape[0]):
- # get the inputs
- inputs, labels = x[i], y[i]
- # zero the parameter gradients
- optimizer.zero_grad()
- # forward + backward + optimize
- outputs = net(inputs)
- loss = criterion(outputs, labels)
- loss.backward()
- optimizer.step()
- # print statistics
- running_loss += loss.item()
- if i % 200 == 199:
- print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 200))
- running_loss = 0.0
- print('Finished Training')
Add Comment
Please, Sign In to add comment