Guest User

Untitled

a guest
Jan 21st, 2019
70
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.58 KB | None | 0 0
  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F
  4. import torch.optim as optim
  5. import numpy as np
  6.  
  7. class NeuralNetworkCalculator(nn.Module):
  8. def __init__(self):
  9. super(NeuralNetworkCalculator, self).__init__()
  10. self.layer_1 =
  11. self.layer_2 = torch.nn.Linear(3, 1)
  12.  
  13. def forward(self, x):
  14. x = F.relu(self.layer_1(x))
  15. x = F.relu(self.layer_2(x))
  16. return x
  17.  
  18. net = NeuralNetworkCalculator()
  19.  
  20. criterion = nn.CrossEntropyLoss()
  21. optimizer = optim.Adam(net.parameters(), lr = 0.0001)
  22.  
  23. device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
  24. net.to(device)
  25.  
  26. ##############################
  27. # REPLACE WITH YOUR OWN DATA #
  28. ##############################
  29. x = torch.from_numpy(np.random.rand(1042,32,)).type(dtype=torch.float).to(device) #(samples, channels, heigth, width)
  30. y = torch.from_numpy(np.random.randint(1,size=(1042, 32))).to(device) #(number_of_classes, samples)
  31.  
  32. for epoch in range(2): # loop over the dataset multiple times
  33. running_loss = 0.0
  34. for i in range(0, x.shape[0]):
  35. # get the inputs
  36. inputs, labels = x[i], y[i]
  37.  
  38. # zero the parameter gradients
  39. optimizer.zero_grad()
  40.  
  41. # forward + backward + optimize
  42. outputs = net(inputs)
  43. loss = criterion(outputs, labels)
  44. loss.backward()
  45. optimizer.step()
  46.  
  47. # print statistics
  48. running_loss += loss.item()
  49. if i % 200 == 199:
  50. print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 200))
  51. running_loss = 0.0
  52. print('Finished Training')
Add Comment
Please, Sign In to add comment