Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- import torch.nn as nn
- import torch.nn.functional as F
- from torch.autograd import Variable
- import torch.optim as optim
- import PIL.Image as Image
- import random
- import os
- import torchvision.transforms as transforms
- import time
- transforms = transforms.Compose([
- transforms.Resize(200),
- transforms.ToTensor()#,
- #transforms.Normalize(mean = [0.3, 0.4,0.2], std = [0.2, 0.3, 0.2]) # Werte müssen zusammengerechnet unter 1 liegen
- ])
- def concatenate(list1, list2):
- list = []
- for i in list1:
- list.append(i)
- for i in list2:
- list.append(i)
- return list
- root = "/home/ich/Desktop/NN-UEbung/dataset"
- image_paths = os.listdir(root + "/training_fake")
- image_paths = concatenate(image_paths, os.listdir(root + "/training_real"))
- train_data_list = []
- test_data_list = []
- test_data = []
- train_data = []
- target_train_list = []
- target_test_list = []
- batch_size = 4
- batch = 0
- for i in range(len(image_paths)):
- img_path = random.choice(image_paths)
- image_paths.remove(img_path)
- if "fake" in img_path:
- if random.choice([0,1]) == 1:
- target_train_list.append(1)
- train_data_list.append(transforms(Image.open(root + "/training_fake/" + img_path)))
- else:
- target_test_list.append(1)
- test_data_list.append(transforms(Image.open(root + "/training_fake/" + img_path)))
- else:
- if random.choice([0,1]) == 1:
- target_train_list.append(0)
- train_data_list.append(transforms(Image.open(root + "/training_real/" + img_path)))
- else:
- target_test_list.append(0)
- test_data_list.append(transforms(Image.open(root + "/training_real/" + img_path)))
- if len(train_data_list) >= batch_size:
- train_data.append((torch.stack(train_data_list), (target_train_list)))
- train_data_list = []
- target_train_list = []
- batch += 1
- print("Batch Nr. " + str(batch))
- if len(test_data_list) >= batch_size:
- test_data.append((torch.stack(test_data_list), (target_test_list)))
- test_data_list = []
- target_test_list = []
- batch += 1
- print("Batch Nr. " + str(batch))
- class Netz(nn.Module):
- def __init__(self):
- super(Netz, self).__init__()
- self.conv1 = nn.Conv2d(3, 5, kernel_size = 5)
- self.conv2 = nn.Conv2d(5, 8, kernel_size = 5)
- self.conv3 = nn.Conv2d(8, 14, kernel_size = 5)
- #self.conv4 = nn.Conv2d(18, 24, kernel_size = 3)
- self.fc1 = nn.Linear(24696, 1000)
- self.fc2 = nn.Linear(1000, batch_size)
- def forward(self, x):
- x = self.conv1(x)
- x = F.max_pool2d(x, 2)
- x = F.relu(x)
- x = self.conv2(x)
- x = F.max_pool2d(x, 2)
- x = F.relu(x)
- x = self.conv3(x)
- x = F.max_pool2d(x, 2)
- x = F.relu(x)
- #x = self.conv4(x)
- #x = F.max_pool2d(x, 2)
- #x = F.relu(x)
- x = x.view(-1, 24696)
- x = self.fc1(x)
- x = F.relu(x)
- x = self.fc2(x)
- return torch.sigmoid(x)
- model = Netz()
- #if torch.cuda.is_available():
- # model = model.cuda()
- # print("Netz auf CUDA verschoben!")
- if os.path.isfile("nn.pt"):
- model = torch.load("nn.pt")
- print("File nn.pt loaded!")
- optimizer = optim.Adam(model.parameters(), lr=0.01)
- torch.autograd.set_detect_anomaly(True)
- def train(epoch):
- model.train()
- batch_id = 1
- for data, target in train_data:
- target = Variable(torch.Tensor(target))
- data = Variable(data)
- #if torch.cuda.is_available():
- # data = data.cuda()
- # target = target.cuda()
- # print("Daten auf CUDA verschoben!")
- optimizer.zero_grad()
- out = model(data)
- criterion = F.binary_cross_entropy
- loss = criterion(out.squeeze(), target)
- loss.backward()
- optimizer.step()
- print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f} (Batch-ID: {})'.format(epoch, (batch_id - 1) * batch_size, len(train_data) * batch_size, 100. * batch_id / len(train_data), loss.item(), batch_id))
- batch_id += 1
- torch.save(model, "nn.pt")
- def test():
- model.eval()
- loss = 0
- for data, target in train_data:
- target = Variable(torch.Tensor(target))
- data = Variable(data)
- #if torch.cuda.is_available():
- # data = data.cuda()
- # target = target.cuda()
- # print("Daten auf CUDA verschoben!")
- out = model(data)
- loss += F.binary_cross_entropy(out.squeeze(), target)
- #torch.cuda.empty_cache()
- print("Average loss: " + str(loss.item() / len(test_data)))
- for epoch in range(1, 31):
- train(epoch)
- test()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement