Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def base_execution1(model: object, X_train: t.Tensor, X_test: t.Tensor, y_train: t.Tensor, y_test: t.Tensor) -> list:
- optimizer = t.optim.Adam(model.parameters(), lr=1e-3)
- loss = nn.MSELoss(reduction='mean')
- num_epochs = 1000
- b_size = 30
- train_data = DataLoader(TensorDataset(X_train, y_train), batch_size=b_size, shuffle=True)
- train_losses = []
- test_losses = []
- for epoch in range(num_epochs):
- total_train_loss = 0
- total_test_loss = 0
- for X_train, y_train in train_data:
- optimizer.zero_grad()
- preds = model.forward(X_train).flatten()
- loss_train = loss(preds, y_train)
- total_train_loss += math.sqrt(loss_train.item())
- loss_train.backward()
- optimizer.step()
- if epoch % 5 == 0:
- model.eval()
- test_preds = model.forward(X_test)
- loss_test = loss(test_preds, y_test)
- total_test_loss += math.sqrt(loss_test.item())
- train_losses.append(total_train_loss)
- test_losses.append(total_test_loss)
- plt.plot(train_losses)
- plt.show()
- return train_losses, test_losses
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement