Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #Workaround for tensors:
- import numpy as np
- import tensorflow as tf
- aiOutPossible = np.array([[.2,.2,.1], [.35, .2, .1,]])
- aiOutPossible = tf.convert_to_tensor(aiOutPossible)
- aiOutPossible = aiOutPossible / aiOutPossible.sum()
- >>> AttributeError: 'tensorflow.python.framework.ops.EagerTensor' object has no attribute 'sum'
- c = tf.reduce_sum(aiOutPossible, axis=1)
- aiOutPossibleNorm = aiOutPossible / tf.reshape(c, (-1, 1))
- print(aiOutPossibleNorm)
- >>> <tf.Tensor: shape=(2, 3), dtype=float64, numpy=
- array([[0.4 , 0.4 , 0.2 ],
- [0.53846154, 0.30769231, 0.15384615]])>
- #Assuming this is correct, here is the new loss function:
- def customLoss(dataOut,aiOut):
- actualOut = dataOut[:, 0:4096]
- possibleMoves = dataOut[:, 4096:8192]
- aiOutPossible = possibleMoves*aiOut #This is the ai output, only including possible moves
- c = tf.reduce_sum(aiOutPossible, axis=1)
- aiOutPossibleNorm = aiOutPossible / tf.reshape(c, (-1, 1))
- #loss = tf.keras.backend.binary_crossentropy(actualOut, aiOutPossibleNorm)
- loss = tf.keras.backend.categorical_crossentropy(actualOut, aiOutPossibleNorm)
- return loss
- #And here is the output:
- 225/225 - 28s - loss: 3.1917 - accuracy: 7.2618e-04 - val_loss: 2.9393 - val_accuracy: 0.0013
- 222/222 - 22s - loss: 2.8795 - accuracy: 0.0018 - val_loss: 2.8688 - val_accuracy: 0.0033
- 201/201 - 20s - loss: 2.8157 - accuracy: 0.0037 - val_loss: 2.8088 - val_accuracy: 0.0047
- 221/221 - 21s - loss: 2.7629 - accuracy: 0.0047 - val_loss: 2.7512 - val_accuracy: 0.0082
- 222/222 - 24s - loss: 2.7161 - accuracy: 0.0088 - val_loss: 2.7203 - val_accuracy: 0.0124
- 221/221 - 23s - loss: 2.6670 - accuracy: 0.0119 - val_loss: 2.6841 - val_accuracy: 0.0127
- 214/214 - 21s - loss: 2.6615 - accuracy: 0.0130 - val_loss: 2.6551 - val_accuracy: 0.0135
- 193/193 - 19s - loss: 2.6199 - accuracy: 0.0123 - val_loss: 2.6307 - val_accuracy: 0.0134
- 217/217 - 21s - loss: 2.5873 - accuracy: 0.0142 - val_loss: 2.6186 - val_accuracy: 0.0151
- 227/227 - 23s - loss: 2.5883 - accuracy: 0.0145 - val_loss: 2.5926 - val_accuracy: 0.0137
- 202/202 - 21s - loss: 2.5732 - accuracy: 0.0136 - val_loss: 2.5800 - val_accuracy: 0.0159
- 217/217 - 21s - loss: 2.5440 - accuracy: 0.0165 - val_loss: 2.5638 - val_accuracy: 0.0165
- 224/224 - 21s - loss: 2.5426 - accuracy: 0.0161 - val_loss: 2.5498 - val_accuracy: 0.0170
- 221/221 - 28s - loss: 2.5150 - accuracy: 0.0164 - val_loss: 2.5286 - val_accuracy: 0.0166
- 211/211 - 26s - loss: 2.4972 - accuracy: 0.0163 - val_loss: 2.5367 - val_accuracy: 0.0169
- 221/221 - 24s - loss: 2.5312 - accuracy: 0.0153 - val_loss: 2.5261 - val_accuracy: 0.0170
- 218/218 - 22s - loss: 2.5103 - accuracy: 0.0161 - val_loss: 2.5146 - val_accuracy: 0.0163
- #Unfortunately, it seems the output is still incredibly low, and very far from the 70% target
- #-Ruler
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement