Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- predicted = torch.tensor([4, 4, 4, 1, 1, 1, 1, 1, 1, 4, 4, 1, 1, 1, 4, 1, 1, 4, 0, 4, 4, 1, 4, 1])
- target = torch.tensor([3, 0, 0, 1, 1, 0, 1, 1, 1, 3, 2, 4, 1, 1, 1, 0, 1, 1, 2, 1, 1, 1, 1, 1,])
- loss = nn.CrossEntropyLoss()
- computed_loss = loss(predicted, target)
- IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
- -----------------------------------------------------------
- IndexError Traceback (most recent call last)
- <ipython-input-208-3cdb253d6620> in <module>
- 1 batch_size = 1000
- 2 train_class = Train((training_set.shape[1]-1), number_of_target_labels, 0.01, 1000)
- ----> 3 train_class.train_model(training_set, batch_size)
- <ipython-input-207-f3e2c7f7979a> in train_model(self, training_data, n_iters)
- 42 out = self.model(x)
- 43 _, predicted = torch.max(out.data, 1)
- ---> 44 loss = self.criterion(predicted, y)
- 45 self.optimizer.zero_grad()
- 46 loss.backward()
- /anaconda3/envs/malicious_ml/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
- 491 result = self._slow_forward(*input, **kwargs)
- 492 else:
- --> 493 result = self.forward(*input, **kwargs)
- 494 for hook in self._forward_hooks.values():
- 495 hook_result = hook(self, input, result)
- /anaconda3/envs/malicious_ml/lib/python3.6/site-packages/torch/nn/modules/loss.py in forward(self, input, target)
- 940 def forward(self, input, target):
- 941 return F.cross_entropy(input, target, weight=self.weight,
- --> 942 ignore_index=self.ignore_index, reduction=self.reduction)
- 943
- 944
- /anaconda3/envs/malicious_ml/lib/python3.6/site-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
- 2054 if size_average is not None or reduce is not None:
- 2055 reduction = _Reduction.legacy_get_string(size_average, reduce)
- -> 2056 return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
- 2057
- 2058
- /anaconda3/envs/malicious_ml/lib/python3.6/site-packages/torch/nn/functional.py in log_softmax(input, dim, _stacklevel, dtype)
- 1348 dim = _get_softmax_dim('log_softmax', input.dim(), _stacklevel)
- 1349 if dtype is None:
- -> 1350 ret = input.log_softmax(dim)
- 1351 else:
- 1352 ret = input.log_softmax(dim, dtype=dtype)
- import torch
- import torch.nn as nn
- from torch.autograd import Variable
- class LogisticRegressionModel(nn.Module):
- def __init__(self, in_dim, num_classes):
- super().__init__()
- self.linear = nn.Linear(in_dim, num_classes)
- def forward(self, x):
- return self.linear(x)
- class Train(LogisticRegressionModel):
- def __init__(self, in_dim, num_classes, lr, batch_size):
- super().__init__(in_dim, num_classes)
- self.batch_size = batch_size
- self.learning_rate = lr
- self.input_layer_dim = in_dim
- self.output_layer_dim = num_classes
- self.criterion = nn.CrossEntropyLoss()
- self.model = LogisticRegressionModel(self.input_layer_dim, self.output_layer_dim)
- self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
- self.model = self.model.to(self.device)
- self.optimizer = torch.optim.SGD(self.model.parameters(), lr = self.learning_rate)
- def epochs(self, iterations, train_dataset, batch_size):
- epochs = int(iterations/(len(train_dataset)/batch_size))
- return epochs
- def train_model(self, training_data, n_iters):
- batch = self.batch_size
- epochs = self.epochs(n_iters, training_data, batch)
- training_data = torch.utils.data.DataLoader(dataset = training_data, batch_size = batch, shuffle = True)
- for epoch in range(epochs):
- for i, data in enumerate(training_data):
- X_train = data[:, :-1]
- Y_train = data[:, -1]
- if torch.cuda.is_available():
- x = Variable(torch.Tensor(X_train).cuda())
- y = Variable(torch.Tensor(Y_train).cuda())
- else:
- x = Variable(torch.Tensor(X_train.float()))
- y = Variable(torch.Tensor(Y_train.float()))
- out = self.model(x)
- _, predicted = torch.max(out.data, 1)
- loss = self.criterion(predicted, y)
- self.optimizer.zero_grad()
- loss.backward()
- self.optimizer.step()
- if i % 100 == 0:
- print('[{}/{}] Loss: {:.6f}'.format(epoch + 1, epochs, loss))
- predicted = torch.tensor([[1,2,3,4]]).float()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement