Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #!/usr/bin/python3
- import torch
- from torch import nn
- from torch import optim
- import random
- model = nn.Sequential(
- nn.Linear(2,2),
- nn.Sigmoid(),
- nn.Linear(2,1),
- nn.Sigmoid())
- learning_rate = torch.tensor(0.0001)
- minibatch_size = 25
- criterion = nn.MSELoss()
- #optimizer = optim.SGD(model.parameters(),lr=learning_rate,momentum=0.9)
- optimizer = optim.Adam(model.parameters())
- def get_item():
- x1 = random.choice([0,1])
- x2 = random.choice([0,1])
- x = torch.tensor([x1,x2],dtype=torch.float)
- yexpected = torch.tensor(float(x1 ^ x2))
- return x,yexpected
- for i in range(100000):
- optimizer.zero_grad()
- xb = []
- yexpectedb = []
- for _ in range(1,minibatch_size):
- x1,yexpected1 = get_item()
- xb.append(x1)
- yexpectedb.append(yexpected1)
- x = torch.stack(xb)
- yexpectedb = torch.stack(yexpectedb)
- yp = model(x)
- y = torch.squeeze(yp)
- loss = criterion(y,yexpectedb)
- print(loss)
- loss.backward()
- optimizer.step()
Advertisement
Add Comment
Please, Sign In to add comment