Advertisement
Guest User

Untitled

a guest
Jul 16th, 2019
143
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.77 KB | None | 0 0
  1. predicted = torch.tensor([4, 4, 4, 1, 1, 1, 1, 1, 1, 4, 4, 1, 1, 1, 4, 1, 1, 4, 0, 4, 4, 1, 4, 1])
  2.  
  3. target = torch.tensor([3, 0, 0, 1, 1, 0, 1, 1, 1, 3, 2, 4, 1, 1, 1, 0, 1, 1, 2, 1, 1, 1, 1, 1,])
  4.  
  5. loss = nn.CrossEntropyLoss()
  6. computed_loss = loss(predicted, target)
  7.  
  8. IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
  9.  
  10. -----------------------------------------------------------
  11. IndexError Traceback (most recent call last)
  12. <ipython-input-208-3cdb253d6620> in <module>
  13. 1 batch_size = 1000
  14. 2 train_class = Train((training_set.shape[1]-1), number_of_target_labels, 0.01, 1000)
  15. ----> 3 train_class.train_model(training_set, batch_size)
  16.  
  17. <ipython-input-207-f3e2c7f7979a> in train_model(self, training_data, n_iters)
  18. 42 out = self.model(x)
  19. 43 _, predicted = torch.max(out.data, 1)
  20. ---> 44 loss = self.criterion(predicted, y)
  21. 45 self.optimizer.zero_grad()
  22. 46 loss.backward()
  23.  
  24. /anaconda3/envs/malicious_ml/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
  25. 491 result = self._slow_forward(*input, **kwargs)
  26. 492 else:
  27. --> 493 result = self.forward(*input, **kwargs)
  28. 494 for hook in self._forward_hooks.values():
  29. 495 hook_result = hook(self, input, result)
  30.  
  31. /anaconda3/envs/malicious_ml/lib/python3.6/site-packages/torch/nn/modules/loss.py in forward(self, input, target)
  32. 940 def forward(self, input, target):
  33. 941 return F.cross_entropy(input, target, weight=self.weight,
  34. --> 942 ignore_index=self.ignore_index, reduction=self.reduction)
  35. 943
  36. 944
  37.  
  38. /anaconda3/envs/malicious_ml/lib/python3.6/site-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
  39. 2054 if size_average is not None or reduce is not None:
  40. 2055 reduction = _Reduction.legacy_get_string(size_average, reduce)
  41. -> 2056 return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
  42. 2057
  43. 2058
  44.  
  45. /anaconda3/envs/malicious_ml/lib/python3.6/site-packages/torch/nn/functional.py in log_softmax(input, dim, _stacklevel, dtype)
  46. 1348 dim = _get_softmax_dim('log_softmax', input.dim(), _stacklevel)
  47. 1349 if dtype is None:
  48. -> 1350 ret = input.log_softmax(dim)
  49. 1351 else:
  50. 1352 ret = input.log_softmax(dim, dtype=dtype)
  51.  
  52. import torch
  53. import torch.nn as nn
  54. from torch.autograd import Variable
  55.  
  56.  
  57. class LogisticRegressionModel(nn.Module):
  58.  
  59. def __init__(self, in_dim, num_classes):
  60. super().__init__()
  61. self.linear = nn.Linear(in_dim, num_classes)
  62.  
  63. def forward(self, x):
  64. return self.linear(x)
  65.  
  66.  
  67. class Train(LogisticRegressionModel):
  68.  
  69. def __init__(self, in_dim, num_classes, lr, batch_size):
  70. super().__init__(in_dim, num_classes)
  71. self.batch_size = batch_size
  72. self.learning_rate = lr
  73. self.input_layer_dim = in_dim
  74. self.output_layer_dim = num_classes
  75. self.criterion = nn.CrossEntropyLoss()
  76. self.model = LogisticRegressionModel(self.input_layer_dim, self.output_layer_dim)
  77. self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
  78. self.model = self.model.to(self.device)
  79. self.optimizer = torch.optim.SGD(self.model.parameters(), lr = self.learning_rate)
  80.  
  81. def epochs(self, iterations, train_dataset, batch_size):
  82. epochs = int(iterations/(len(train_dataset)/batch_size))
  83. return epochs
  84.  
  85. def train_model(self, training_data, n_iters):
  86. batch = self.batch_size
  87. epochs = self.epochs(n_iters, training_data, batch)
  88. training_data = torch.utils.data.DataLoader(dataset = training_data, batch_size = batch, shuffle = True)
  89.  
  90. for epoch in range(epochs):
  91.  
  92. for i, data in enumerate(training_data):
  93.  
  94. X_train = data[:, :-1]
  95. Y_train = data[:, -1]
  96.  
  97. if torch.cuda.is_available():
  98. x = Variable(torch.Tensor(X_train).cuda())
  99. y = Variable(torch.Tensor(Y_train).cuda())
  100.  
  101. else:
  102. x = Variable(torch.Tensor(X_train.float()))
  103. y = Variable(torch.Tensor(Y_train.float()))
  104.  
  105. out = self.model(x)
  106. _, predicted = torch.max(out.data, 1)
  107. loss = self.criterion(predicted, y)
  108. self.optimizer.zero_grad()
  109. loss.backward()
  110. self.optimizer.step()
  111.  
  112. if i % 100 == 0:
  113. print('[{}/{}] Loss: {:.6f}'.format(epoch + 1, epochs, loss))
  114.  
  115. predicted = torch.tensor([[1,2,3,4]]).float()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement