Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import json
- import tensorflow as tf
- import numpy as np
- import matplotlib.pyplot as plt
- def json_data():
- with open('assdata13.json') as f:
- data = json.load(f)
- return data
- def build_features(json_data):
- n = len(json_data)
- print("dataset size " + str(n))
- Xs = np.zeros((n, 45))
- Ys = np.zeros((n, 1))
- log_idx = 0
- for log in json_data:
- log = log['_source']
- XsCur = Xs[log_idx]
- XsCur[0] = 0
- XsCur[1] = log['userBS']['level']
- XsCur[2] = log['userBS']['str']
- XsCur[3] = log['userBS']['dex']
- XsCur[4] = log['userBS']['int']
- XsCur[5] = log['userBS']['luk']
- XsCur[6] = log['userBS']['maxHP']
- XsCur[7] = log['userBS']['maxMP']
- XsCur[8] = log['userFS']['str']
- XsCur[9] = log['userFS']['dex']
- XsCur[10] = log['userFS']['int']
- XsCur[11] = log['userFS']['luk']
- XsCur[12] = log['userFS']['pAD']
- XsCur[13] = log['userFS']['pDD']
- XsCur[14] = log['userFS']['mAD']
- XsCur[15] = log['userFS']['mDD']
- XsCur[16] = log['userFS']['acc']
- XsCur[17] = log['userFS']['eva']
- XsCur[18] = log['userFS']['speed']
- XsCur[19] = log['userFS']['jump']
- XsCur[20] = log['userSS']['pAD']
- XsCur[21] = log['userSS']['pDD']
- XsCur[22] = log['userSS']['mAD']
- XsCur[23] = log['userSS']['mDD']
- XsCur[24] = log['userSS']['acc']
- XsCur[25] = log['userSS']['eva']
- XsCur[26] = log['userSS']['speed']
- XsCur[27] = log['userSS']['jump']
- XsCur[28] = log['userSS']['defenseAtt']
- XsCur[29] = log['userSS']['defenseState']
- XsCur[30] = float(log['rand32Output'][0] % 10000000 * 0.000000100000010000001)
- XsCur[31] = float(log['rand32Output'][1] % 10000000 * 0.000000100000010000001)
- XsCur[32] = float(log['rand32Output'][2] % 10000000 * 0.000000100000010000001)
- XsCur[33] = float(log['rand32Output'][3] % 10000000 * 0.000000100000010000001)
- XsCur[34] = float(log['rand32Output'][4] % 10000000 * 0.000000100000010000001)
- XsCur[35] = float(log['rand32Output'][5] % 10000000 * 0.000000100000010000001)
- XsCur[36] = float(log['rand32Output'][6] % 10000000 * 0.000000100000010000001)
- #action = log['userActionData']
- #for i in range(0, 186):
- #idx = 30 + i
- #if i == action:
- #XsCur[idx] = 1
- #else:
- #XsCur[idx] = 0
- XsCur[37] = log['enemyStat']['level']
- XsCur[38] = log['enemyStat']['pAD']
- XsCur[39] = log['enemyStat']['pDD']
- XsCur[40] = log['enemyStat']['mAD']
- XsCur[41] = log['enemyStat']['mDD']
- XsCur[42] = log['enemyStat']['acc']
- XsCur[43] = log['enemyStat']['eva']
- XsCur[44] = log['enemyStat']['speed']
- Ys[log_idx][0] = log['damage']
- log_idx += 1
- assert Xs.shape[0] == Ys.shape[0]
- return Xs, Ys
- def run_model(Xs, Ys):
- #sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
- # model = tf.keras.models.Sequential([
- # tf.keras.layers.Dense(28, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.00233)),
- # #tf.keras.layers.Dense(14, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.00353)),
- # tf.keras.layers.Dense(1)
- # ])
- model = tf.keras.models.Sequential([
- tf.keras.layers.Dense(45, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.000233)),
- tf.keras.layers.Dense(30, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.000233)),
- tf.keras.layers.Dense(1)
- ])
- # model = tf.keras.models.Sequential([
- # tf.keras.layers.Dense(1220, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)),
- # tf.keras.layers.Dropout(0.05),
- # tf.keras.layers.Dense(510, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)),
- # tf.keras.layers.Dropout(0.05),
- # tf.keras.layers.Dense(150, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)),
- # tf.keras.layers.Dense(1)
- # ])
- model.compile(optimizer='adam',
- loss='mean_squared_error',
- metrics=['mse', 'mae', 'mape'])
- p = np.random.permutation(len(Xs))
- Xs = Xs[p]
- Ys = Ys[p]
- split = 800
- model.fit(Xs[:split], Ys[:split], epochs=28000, batch_size=16,
- callbacks=[tf.keras.callbacks.EarlyStopping(monitor='mse', patience=800,
- restore_best_weights=True)])
- model.evaluate(Xs[split:], Ys[split:])
- plt.figure(1)
- test_predictions = model.predict(Xs[split:]).flatten()
- plt.scatter(Ys[split:], test_predictions)
- plt.xlabel('True Values')
- plt.ylabel('Predictions, NN 1 Test')
- plt.axis('equal')
- plt.axis('square')
- plt.xlim([0, plt.xlim()[1]])
- plt.ylim([0, plt.ylim()[1]])
- _ = plt.plot([-500, 500], [-500, 500])
- plt.show()
- model = tf.keras.models.Sequential([
- tf.keras.layers.Dense(45, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.000233)),
- #tf.keras.layers.Dense(14, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.00353)),
- tf.keras.layers.Dense(1)
- ])
- model.compile(optimizer='adam',
- loss='mean_squared_error',
- metrics=['mse', 'mae', 'mape'])
- model.fit(Xs[:split], Ys[:split], epochs=28000, batch_size=32,
- callbacks=[tf.keras.callbacks.EarlyStopping(monitor='mse', patience=400,
- restore_best_weights=True)])
- model.evaluate(Xs[split:], Ys[split:])
- plt.figure(2)
- test_predictions = model.predict(Xs[split:]).flatten()
- plt.scatter(Ys[split:], test_predictions)
- plt.xlabel('True Values')
- plt.ylabel('Predictions, NN 2 Test')
- plt.axis('equal')
- plt.axis('square')
- plt.xlim([0, plt.xlim()[1]])
- plt.ylim([0, plt.ylim()[1]])
- _ = plt.plot([-500, 500], [-500, 500])
- plt.show()
- X, Y = build_features(json_data())
- run_model(X, Y)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement