Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # %%
- import torch as th
- import torch.nn as nn
- import pandas as pd
- from os import mkdir
- from shutil import copyfile, copytree
- from torchvision import datasets, transforms
- import matplotlib.pyplot as plt
- from tqdm import tqdm
- import numpy as np
- # %%
- train_transformation = transforms.Compose([
- transforms.ToTensor(),
- transforms.RandomCrop(50, 4),
- ])
- train_dataset = datasets.ImageFolder (
- root="C:\\Users\\preda\\Desktop\\Github\\Facultate\\Anul 2\\IA\\train_folder",
- transform=train_transformation,
- )
- train_loader = th.utils.data.DataLoader(
- train_dataset, batch_size=64, shuffle=True
- )
- validation_transformation = transforms.Compose([
- transforms.ToTensor(),
- ])
- validation_dataset = datasets.ImageFolder (
- root="C:\\Users\\preda\\Desktop\\Github\\Facultate\\Anul 2\\IA\\validation_folder",
- transform=validation_transformation,
- )
- validation_loader = th.utils.data.DataLoader(
- validation_dataset, batch_size=64, shuffle=True
- )
- # %%
- class Network(nn.Module):
- def __init__(self):
- super().__init__()
- self.net = nn.Sequential(
- nn.Conv2d(3, 32, 3, padding=1),
- nn.ReLU(),
- nn.BatchNorm2d(32),
- nn.MaxPool2d(2),
- nn.Conv2d(32, 64, 3, padding=1),
- nn.ReLU(),
- nn.BatchNorm2d(64),
- nn.MaxPool2d(2),
- nn.Flatten(),
- nn.Linear(64 * 12 * 12, 1000),
- nn.ReLU(),
- nn.Dropout(0.3),
- nn.Linear(1000, 200),
- nn.ReLU(),
- nn.Dropout(0.3),
- nn.Linear(200, 3),
- )
- def forward(self, x):
- return self.net(x)
- # %%
- def train_one_epoch():
- net.train()
- avg_loss = 0.
- for images, labels in tqdm(train_loader):
- images = images.cuda()
- labels = labels.cuda()
- predictions = net(images)
- loss = criterion(predictions, labels)
- avg_loss += loss.item()
- loss.backward()
- optimizer.step()
- return avg_loss / len(train_loader)
- def test_data(data_loader):
- net.eval()
- with th.no_grad():
- hit = 0
- total = 0
- avg_loss = 0.
- for images, labels in tqdm(data_loader):
- images = images.cuda()
- labels = labels.cuda()
- predictions = net(images)
- loss = criterion(predictions, labels)
- avg_loss += loss.item()
- hit += th.sum(labels == predictions.argmax(dim=1))
- total += len(images)
- return (hit / total, avg_loss / len(data_loader))
- def print_confussion_matrix(data_loader):
- ans = [[0 for i in range(3)] for j in range(3)]
- with th.no_grad():
- for images, labels in tqdm(data_loader):
- images = images.cuda()
- labels = labels.cuda()
- predictions = net(images)
- prediction = predictions.argmax(dim=1)
- for i in range(len(labels)):
- ans[labels[i].item()][prediction[i].item()] += 1
- plt.imshow(ans, cmap='gray')
- print(np.array(ans))
- # %%
- # definim datele
- net = Network().cuda()
- criterion = nn.CrossEntropyLoss().cuda()
- loss_hist = []
- # %%
- optimizer = th.optim.Adam(net.parameters(), lr=1e-5)
- for epoch in range(20):
- loss = train_one_epoch()
- print(f"Loss: {loss}")
- print(f"Accuracy: {test_data(validation_loader)}")
- loss_hist.append(loss)
- # %%
- test_dataset = datasets.ImageFolder (
- root="C:\\Users\\preda\\Desktop\\Github\\Facultate\\Anul 2\\IA\\test_folder",
- transform=validation_transformation,
- )
- test_loader = th.utils.data.DataLoader(
- test_dataset, batch_size=1, shuffle=False
- )
- # %%
- def print_test():
- tests = []
- for file in os.listdir("C:\\Users\\preda\\Desktop\\Github\\Facultate\\Anul 2\\IA\\test_folder\\test"):
- tests.append(file)
- lastFile = 0
- net.eval()
- f = open("C:\\Users\\preda\\Desktop\\Github\\Facultate\\Anul 2\\IA\\solution.txt", "w")
- f.write("id,label\n")
- with th.no_grad():
- for images, labels in tqdm(test_loader):
- images = images.cuda()
- labels = labels.cuda()
- predictions = net(images)
- prediction = predictions.argmax(dim=1)
- f.write(tests[lastFile] + "," + str(prediction.item()) + "\n")
- lastFile += 1
- f.close()
- # %%
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement