Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import pickle
- import torch
- import torch.nn as nn
- import torch.nn.functional as F
- from models import graph_conv
- import numpy
- import time
- infile = open('DrugsCom_review_positive_short_ALL_10stars_15tokens_DEP_TREE.pkl', 'rb')
- data_train =pickle.load(infile)
- dependency_tree = data_train.dependency_tree_clean.to_list()
- no_of_tokens = data_train.no_of_tokens.to_list()
- sosy_idx = data_train.SOSY_index.to_list()
- sentiment_labels = data_train.sentiment.to_list()
- embedding_size = 100
- target_list = []
- for i in sentiment_labels:
- if i == -1:
- target_list.append(0)
- elif i == 1:
- target_list.append(1)
- else:
- print('Sentiment labels -1 or 1.')
- target = torch.tensor(target_list, dtype=torch.long) #dtype=torch.int64
- numpy.random.seed(20)
- edges = torch.as_tensor(dependency_tree[0], dtype=torch.int64)
- vertices = torch.as_tensor(numpy.random.rand(no_of_tokens[0], embedding_size), dtype=torch.float)
- idx = torch.as_tensor(sosy_idx[0], dtype=torch.int64)
- target = target[0]
- device = torch.device("cpu")
- model = graph_conv()
- model.to(device)
- optimizer = torch.optim.Adam(model.parameters()) # lr=0.001 by default
- optimizer.zero_grad()
- node_embed, sosy_embed, sosy = model(edges, vertices, idx)
- loss = F.cross_entropy(sosy, target)
- loss.backward()
- optimizer.step()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement