Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- import numpy as np
- from keras.utils import to_categorical
- from model import create_model
- import matplotlib.pyplot as plt
- colors = {
- 0:(0.11, 0.41, 0.00),
- 1:(0.33, 0.44, 0.55),
- 2:(0.66, 0.77, 0.88),
- 3:(0.99, 0.3, 0.2),
- 4:(0.65, 0.00, 0.00),
- 5:(0.00, 0.00, 0.3),
- 6:(0.52, 1.0, 0.1),
- 7:(0.0, 0.0, 1.0),
- 8:(0.54,0.78,0.1),
- 9:(0.54,0.12,0.1),
- 10:(0.0,0.24,0.87),
- 11:(0.87,0.0,0.87),
- 12:(0.24,0.1,0.71),
- 13:(0.9, 0.12, 0.71),
- 14:(0.94, 0.1, 0.37)
- }
- def main():
- x_train,y_train,x_test,y_test,x_val,y_val,all_points,all_points_normal = load_data("clusters_file.txt")
- model = create_model(x_train,x_test,y_test,y_train,x_val,y_val)
- prediction = model.predict(all_points)
- clusters = create_clusters(prediction)
- for index,point in enumerate(all_points_normal):
- plt.scatter(int(point[0]), int(point[1]), color=colors[int(clusters[index])], marker='o')
- plt.show()
- def create_clusters(predictions):
- clusters = []
- for point in predictions:
- clusters.append(np.argmax(point))
- return clusters
- def load_data(filename):
- x_train,y_train,x_test,y_test,x_val,y_val,all_points,all_points_normal = [],[],[],[],[],[],[],[]
- data = []
- with open(filename, 'r') as file:
- for line in file.readlines():
- data.append(list(map(float,line.split(','))))
- data = np.asarray(data)
- np.random.shuffle(data)
- length = data.shape[0]
- """Divide data"""
- train = data[:int(length *0.3)]
- test = data[int(length *0.3):int(length *0.4)]
- val = data[int(length *0.4):]
- x_train, y_train = train[:, 0:2], train[:, 2]
- x_val, y_val = val[:, 0:2], val[:, 2]
- x_test, y_test = test[:, 0:2], test[:, 2]
- all_points_normal = np.concatenate((x_test, x_train, x_val), axis=0)
- y_train = to_categorical(y_train, num_classes=15)
- y_test = to_categorical(y_test, num_classes=15)
- y_val = to_categorical(y_val, num_classes=15)
- """Normalize data"""
- normalization_parameter = 1000000
- x_train /= normalization_parameter
- x_val /= normalization_parameter
- x_test /= normalization_parameter
- all_points = np.concatenate((x_test, x_train, x_val), axis=0)
- return x_train,y_train,x_test,y_test,x_val,y_val,all_points,all_points_normal
- def load_model():
- pass
- if __name__ == '__main__':
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement