Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- from tensorflow.keras.models import Sequential
- from tensorflow.keras.layers import Dense, Dropout, LSTM#, CuDNNLSTM
- import numpy as np
- import pandas as pd
- import keras
- import json
- import random
- from datetime import datetime
- logdir = "logs\\scalars\\" + datetime.now().strftime("%Y%m%d-%H%M%S")
- tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
- match_duration = 1200
- seq_length = 240
- matches_per_player = 100
- partition = json.load(open("partition.json"))
- labels = json.load(open("labels.json"))
- #index,cursor_X_mean,cursor_X_std,cursor_X_changes,cursor_Y_mean,cursor_Y_std,cursor_Y_changes,
- #heroCell_X_mean,heroCell_X_std,heroCell_X_changes,heroCell_Y_mean,heroCell_Y_std,heroCell_Y_changes,
- #heroVec_X_mean,heroVec_X_std,heroVec_X_changes,heroVec_Y_mean,heroVec_Y_std,heroVec_Y_changes,
- #cameraCell_X_mean,cameraCell_X_std,cameraCell_X_changes,cameraCell_Y_mean,cameraCell_Y_std,cameraCell_Y_changes,
- #cameraVec_X_mean,cameraVec_X_std,cameraVec_X_changes,cameraVec_Y_mean,cameraVec_Y_std,cameraVec_Y_changes,cameraVec_Z_mean,cameraVec_Z_std,cameraVec_Z_changes,
- #n_ping,ping_X_mean,ping_X_std,ping_Y_mean,ping_Y_std,
- #n_act1,act1_X_mean,act1_X_std,act1_Y_mean,act1_Y_std,n_act2,n_act3,act3_X_mean,act3_X_std,act3_Y_mean,act3_Y_std,n_act4,n_act5,act5_X_mean,act5_X_std,act5_Y_mean,act5_Y_std,
- #n_act6,n_act7,n_act8,n_act10,n_act12,act12_X_mean,act12_X_std,act12_Y_mean,act12_Y_std,n_act27,n_act33
- #players to use
- id_list = ["35481334","43181484","40030765","72856820","82550973","93725467","96626270"]
- features = ["cursor_X_mean","cursor_X_std","cursor_X_changes","cursor_Y_mean","cursor_Y_std","cursor_Y_changes","n_act1","act1_X_mean","act1_X_std","act1_Y_mean","act1_Y_std",
- "cameraCell_X_mean","cameraCell_X_std","cameraCell_X_changes","cameraCell_Y_mean","cameraCell_Y_std","cameraCell_Y_changes",
- "cameraVec_X_mean","cameraVec_X_std","cameraVec_X_changes","cameraVec_Y_mean","cameraVec_Y_std","cameraVec_Y_changes","cameraVec_Z_mean","cameraVec_Z_std","cameraVec_Z_changes",
- "n_act2","n_act3","n_act4","n_act5","n_act6","n_act7","n_act8","n_act10","n_act12","n_act27","n_act33"]
- #features = ["heroCell_X_mean","heroCell_X_std","heroCell_X_changes","heroCell_Y_mean","heroCell_Y_std","heroCell_Y_changes",
- # "heroVec_X_mean","heroVec_X_std","heroVec_X_changes","heroVec_Y_mean","heroVec_Y_std","heroVec_Y_changes"]
- #features = ["cameraCell_X_mean","cameraCell_X_std","cameraCell_X_changes","cameraCell_Y_mean","cameraCell_Y_std","cameraCell_Y_changes",
- #"cameraVec_X_mean","cameraVec_X_std","cameraVec_X_changes","cameraVec_Y_mean","cameraVec_Y_std","cameraVec_Y_changes"]
- #features = ["n_act1","act1_X_mean","act1_X_std","act1_Y_mean","act1_Y_std"]
- #features = ["n_act1","n_act2","n_act3","n_act4","n_act5","n_act6","n_act7","n_act8","n_act10","n_act12","n_act27","n_act33"]
- labels_id_list = dict()
- for k in range(0,len(id_list)):
- labels_id_list[id_list[k]] = k
- print(labels_id_list)
- n_training_samples = int( (match_duration / seq_length) * len(id_list) * matches_per_player * 0.8 )
- n_test_samples = int( (match_duration / seq_length) * len(id_list) * matches_per_player * 0.2 )
- x_train = np.zeros((n_training_samples,seq_length,len(features)))
- x_test = np.zeros((n_test_samples,seq_length,len(features)))
- y_train = np.zeros(n_training_samples)
- y_test = np.zeros(n_test_samples)
- i = 0
- list_to_shuffle = []
- #create training set
- for f in partition["train"]:
- player_id= f.split("_")[-1][:-4]
- if player_id in id_list:
- list_to_shuffle.append(f)
- random.shuffle(list_to_shuffle)
- seq_per_match = int(match_duration/seq_length)
- for f in list_to_shuffle:
- df = pd.read_csv("D:\\provaRNN\\normalized_csv\\"+f)
- for k in range (0,seq_per_match):
- X = np.array(df[features][k*seq_length:(k+1)*seq_length])
- X = X.reshape(seq_length,len(features))
- x_train[i,] = X
- player_id = f.split("_")[-1][:-4]
- y_train[i] = labels_id_list[player_id]
- i += 1
- list_to_shuffle = []
- i = 0
- #create validation set
- for f in partition["validation"]:
- player_id= f.split("_")[-1][:-4]
- if player_id in id_list:
- list_to_shuffle.append(f)
- random.shuffle(list_to_shuffle)
- for f in list_to_shuffle:
- df = pd.read_csv("D:\\provaRNN\\normalized_csv\\"+f)
- for k in range (0,seq_per_match):
- X = np.array(df[features][k*seq_length:(k+1)*seq_length])
- X = X.reshape(seq_length,len(features))
- x_test[i,] = X
- player_id = f.split("_")[-1][:-4]
- y_test[i] = labels_id_list[player_id]
- i += 1
- y_train = keras.utils.to_categorical(y_train, num_classes=len(id_list))
- y_test = keras.utils.to_categorical(y_test, num_classes=len(id_list))
- #print(len(x_train))
- #print(x_train)
- model = Sequential()
- # IF you are running with a GPU, try out the CuDNNLSTM layer type instead (don't pass an activation, tanh is required)
- model.add(LSTM(256, input_shape=(x_train.shape[1:]), return_sequences=True))
- #model.add(Dropout(0.2))
- model.add(LSTM(256))
- #model.add(Dropout(0.2))
- model.add(Dense(64, activation='relu'))
- #model.add(Dropout(0.2))
- model.add(Dense(len(id_list), activation='softmax'))
- opt = tf.keras.optimizers.Adam(lr=0.0001, decay=1e-6)
- # Compile model
- model.compile(
- loss='categorical_crossentropy',
- optimizer=opt,
- metrics=['accuracy'],
- )
- print(model.summary())
- training_history = model.fit(x_train,
- y_train,
- epochs=150,
- shuffle = True,
- validation_data=(x_test, y_test),
- callbacks=[tensorboard_callback])
- print("Average test loss: ", np.average(training_history.history['loss']))
- print("Average test accuracy: ", np.average(training_history.history['accuracy']))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement