Advertisement
max2201111

vladimir CNN last OK very good

Jun 23rd, 2024
430
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 13.58 KB | Science | 0 0
  1. import numpy as np
  2. import matplotlib.pyplot as plt
  3. import tensorflow as tf
  4. from tqdm.notebook import tqdm_notebook
  5. from IPython.display import display, Javascript
  6. from google.colab import files
  7. import os
  8. import shutil
  9. import ast
  10. from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
  11. import seaborn as sns
  12. from skimage.transform import resize
  13. import sys
  14.  
  15. display(Javascript('IPython.OutputArea.auto_scroll_threshold = 9999;'))
  16.  
  17. label_colors = {0: [0, 128, 0], 1: [255, 0, 0]}
  18. label_colors_testing = {0: [0, 128, 0], 1: [255, 0, 0]}
  19.  
  20. %matplotlib inline
  21.  
  22. def create_image(data, predictions, label_colors):
  23.     num_rows, num_columns = len(data), len(data[0])
  24.     image = np.zeros((num_rows, num_columns + 1, 3), dtype=np.uint8)
  25.     min_val = np.min(data)
  26.     max_val = np.max(data)
  27.     for i in range(num_rows):
  28.         for j in range(num_columns):
  29.             pixel_value = int(np.interp(data[i][j], [min_val, max_val], [0, 255]))
  30.             image[i, j] = np.array([pixel_value] * 3)
  31.         image[i, -1] = label_colors[predictions[i]]
  32.     return image
  33.  
  34. def create_imageN(data, predictions, label_colors):
  35.     num_training_rows = len(data)
  36.     num_columns = len(data[0])
  37.     image_training = np.zeros((num_training_rows, num_columns + 1, 3), dtype=np.uint8)
  38.     for i in range(num_training_rows):
  39.         for j in range(num_columns):
  40.             pixel_value = int(np.interp(data[i][j], [-3, 3], [0, 255]))
  41.             image_training[i, j] = np.array([pixel_value] * 3)
  42.         image_training[i, -1] = label_colors[int(predictions[i])]
  43.     return image_training
  44.  
  45. def create_cnn_model(input_shape):
  46.     model = tf.keras.Sequential([
  47.         tf.keras.layers.InputLayer(input_shape=input_shape),
  48.         tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu'),
  49.         tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
  50.         tf.keras.layers.Flatten(),
  51.         tf.keras.layers.Dense(64, activation='relu'),
  52.         tf.keras.layers.Dense(1, activation='sigmoid')
  53.     ])
  54.     model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
  55.     return model
  56.  
  57. new_persons_results = [
  58.     [0.030391238492519845, 0.23021081913032299, 0.4743575198860915, 0.639395348276238],
  59.     [0.19790381537769108, 0.37639843860181527, 0.5676528538456297, 0.716530820399044],
  60.     [0.0035245462826666075, 0.23127629815305784, 0.4802171123709532, 0.6591272725083992],
  61.     [0.059230621364548486, 0.24424510845680134, 0.442553808602372, 0.6891856336835676],
  62.     [0.05536813173866345, 0.2538888869331579, 0.47861285542743165, 0.6200559751500355],
  63.     [0.1300359168058454, 0.38443677757577344, 0.5957238735056223, 0.795823160451845],
  64.     [0.1743368240338569, 0.3713129035302336, 0.5640350202165867, 0.7213527928848786],
  65.     [0.09173335232875372, 0.2559096689549753, 0.49527436563146954, 0.6970388573439903],
  66.     [0.015235204378572087, 0.2284904031445293, 0.46613902406934005, 0.6917336579549159],
  67.     [0.0011416656054787145, 0.24567669307188245, 0.4388400949432476, 0.667323193441009],
  68.     [0.11776711, 0.17521301, 0.5074825,  0.8509191 ],
  69.     [0.12314088, 0.27565651, 0.52214202, 0.77386896],
  70. ]
  71.  
  72. uploaded = files.upload()
  73. for filename in uploaded.keys():
  74.     original_path = f"/content/{filename}"
  75.     destination_path = os.path.join("/content/", "/content/DATA2")
  76.     shutil.move(original_path, destination_path)
  77.     print(f"Soubor {filename} byl přesunut do {destination_path}")
  78.  
  79. file_path = '/content/DATA2'
  80. with open(file_path, 'r') as file:
  81.     code = file.read()
  82.  
  83. A_list = ast.literal_eval(code)
  84. A = np.array(A_list)
  85.  
  86. labels = [results[-1] for results in A]
  87. data = [results[:-1] for results in A]
  88.  
  89. num_training_rows = 50
  90. num_testing_rows = 50
  91. X_train, X_test, y_train, y_test = data[:num_training_rows], data[num_training_rows:num_training_rows+num_testing_rows], labels[:num_training_rows], labels[num_training_rows:num_training_rows+num_testing_rows]
  92. X_train, X_test, y_train, y_test = data[:num_training_rows], data[:num_testing_rows], labels[:num_training_rows], labels[:num_testing_rows]
  93.  
  94. mean_values = np.mean(X_train, axis=0)
  95. std_values = np.std(X_train, axis=0)
  96. X_train_normalized = (X_train - mean_values) / std_values
  97. X_test_normalized = (X_test - mean_values) / std_values
  98.  
  99. # Verify normalization
  100. print("Mean of X_train_normalized (should be close to 0):", np.mean(X_train_normalized, axis=0))
  101. print("Std of X_train_normalized (should be close to 1):", np.std(X_train_normalized, axis=0))
  102.  
  103. # Generate images from normalized data for CNN
  104. train_predictions = [0] * len(X_train_normalized)  # dummy predictions for training images
  105. test_predictions = (dnn_model.predict(X_test_normalized) > 0.5).astype(int).flatten()  # Actual predictions for testing images
  106.  
  107. image_training = create_imageN(X_train_normalized, y_train, label_colors)
  108. image_testing = create_imageN(X_test_normalized, test_predictions, label_colors_testing)
  109.  
  110. # Resize images to a fixed size for CNN input
  111. image_training_resized = [resize(img[:, :-1], (100, 100, 3)) for img in image_training]
  112. image_testing_resized = [resize(img[:, :-1], (100, 100, 3)) for img in image_testing]
  113.  
  114. # Reshape images for CNN
  115. X_train_cnn = np.array(image_training_resized)
  116. X_test_cnn = np.array(image_testing_resized)
  117.  
  118. # DNN Model (unchanged)
  119. dnn_model = tf.keras.Sequential([
  120.     tf.keras.layers.Dense(128, activation='relu', input_shape=(len(X_train[0]),)),
  121.     tf.keras.layers.Dense(64, activation='relu'),
  122.     tf.keras.layers.Dense(1, activation='sigmoid')
  123. ])
  124. dnn_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
  125.  
  126. # Training DNN Model
  127. dnn_accuracy_history = []
  128. epochs = 32
  129.  
  130. for epoch in tqdm_notebook(range(epochs)):
  131.     history_dnn = dnn_model.fit(X_train_normalized, np.array(y_train), epochs=1, verbose=0, shuffle=False)
  132.     dnn_accuracy_history.append(history_dnn.history['accuracy'][0])
  133.  
  134.     if epoch == 1:
  135.         y_pred_after_2nd_epoch_dnn = dnn_model.predict(X_test_normalized)
  136.         y_pred_binary_after_2nd_epoch_dnn = [1 if pred >= 0.5 else 0 for pred in y_pred_after_2nd_epoch_dnn]
  137.         image_testing_before_2nd_epoch_dnn = create_image(X_test_normalized, y_pred_binary_after_2nd_epoch_dnn, label_colors_testing)
  138.  
  139.     if epoch >= epochs-1:
  140.         print(f"HERE HERE Epoch: {epoch}, Epochs: {epochs}\n")
  141.         sys.stdout.flush()
  142.  
  143.         # Iterate through new persons
  144.         for idx, personNEW_results in enumerate(new_persons_results, start=1):
  145.             assert len(personNEW_results) == len(X_train[0]), "Mismatch in the number of features."
  146.             personNEW_results_normalized = (np.array(personNEW_results) - mean_values) / std_values
  147.             personNEW_prediction_dnn = dnn_model.predict(np.array([personNEW_results_normalized]))
  148.             personNEW_label_dnn = 1 if personNEW_prediction_dnn >= 0.5 else 0
  149.             y_pred_after_50_epochs_dnn = dnn_model.predict(X_test_normalized)
  150.             y_pred_binary_after_50_epochs_dnn = [1 if pred >= 0.5 else 0 for pred in y_pred_after_50_epochs_dnn]
  151.             image_testing_after_50_epochs_dnn = create_image(X_test_normalized, y_pred_binary_after_50_epochs_dnn, label_colors_testing)
  152.             image_personNEW_dnn = create_imageN([personNEW_results_normalized], [personNEW_label_dnn], label_colors)
  153.             plt.figure(figsize=(5, 5))
  154.             plt.imshow(image_personNEW_dnn)
  155.             plt.title(f"New Person {idx} - DNN\nLabel: {personNEW_label_dnn}, Prediction: {personNEW_prediction_dnn}")
  156.             plt.axis("off")
  157.             plt.show()
  158.  
  159. # CNN Model
  160. cnn_model = create_cnn_model((100, 100, 3))
  161.  
  162. # Training CNN Model
  163. cnn_accuracy_history = []
  164.  
  165. for epoch in tqdm_notebook(range(epochs)):
  166.     history_cnn = cnn_model.fit(X_train_cnn, np.array(y_train), epochs=1, verbose=0, shuffle=False)
  167.     cnn_accuracy_history.append(history_cnn.history['accuracy'][0])
  168.  
  169.     if epoch == 1:
  170.         y_pred_after_2nd_epoch_cnn = cnn_model.predict(X_test_cnn)
  171.         y_pred_binary_after_2nd_epoch_cnn = [1 if pred >= 0.5 else 0 for pred in y_pred_after_2nd_epoch_cnn]
  172.         image_testing_before_2nd_epoch_cnn = create_image(X_test_normalized, y_pred_binary_after_2nd_epoch_cnn, label_colors_testing)
  173.  
  174.     if epoch >= epochs-1:
  175.         print(f"HERE HERE Epoch: {epoch}, Epochs: {epochs}\n")
  176.         sys.stdout.flush()
  177.  
  178.         # Iterate through new persons
  179.         for idx, personNEW_results in enumerate(new_persons_results, start=1):
  180.             assert len(personNEW_results) == len(X_train[0]), "Mismatch in the number of features."
  181.             personNEW_results_normalized = (np.array(personNEW_results) - mean_values) / std_values
  182.             image_personNEW = create_imageN([personNEW_results_normalized], [0], label_colors)
  183.             image_personNEW_resized = resize(image_personNEW[:, :-1], (100, 100, 3))
  184.             personNEW_prediction_cnn = cnn_model.predict(np.array([image_personNEW_resized]))
  185.             personNEW_label_cnn = 1 if personNEW_prediction_cnn >= 0.5 else 0
  186.             y_pred_after_50_epochs_cnn = cnn_model.predict(X_test_cnn)
  187.             y_pred_binary_after_50_epochs_cnn = [1 if pred >= 0.5 else 0 for pred in y_pred_after_50_epochs_cnn]
  188.             image_testing_after_50_epochs_cnn = create_image(X_test_normalized, y_pred_binary_after_50_epochs_cnn, label_colors_testing)
  189.             image_personNEW_cnn = create_imageN([personNEW_results_normalized], [personNEW_label_cnn], label_colors)
  190.             plt.figure(figsize=(5, 5))
  191.             plt.imshow(image_personNEW_cnn)
  192.             plt.title(f"New Person {idx} - CNN\nLabel: {personNEW_label_cnn}, Prediction: {personNEW_prediction_cnn}")
  193.             plt.axis("off")
  194.             plt.show()
  195.  
  196. # Display the images
  197. plt.figure(figsize=(25, 15))
  198. plt.subplot(2, 2, 1)
  199. plt.imshow(image_training)
  200. plt.title("Training Data")
  201. plt.axis("off")
  202.  
  203. plt.subplot(2, 2, 2)
  204. plt.imshow(image_testing_before_2nd_epoch_dnn)
  205. plt.title("Testing Data (2nd Epoch) - DNN")
  206. plt.axis("off")
  207.  
  208. plt.subplot(2, 2, 3)
  209. plt.imshow(image_testing_after_50_epochs_dnn)
  210. plt.title(f"Testing Data ({epochs} Epochs) - DNN")
  211. plt.axis("off")
  212.  
  213. plt.subplot(2, 2, 4)
  214. plt.imshow(image_personNEW_dnn)
  215. plt.title(f"New Person - DNN\nLabel: {personNEW_label_dnn},[{personNEW_prediction_dnn}]")
  216. plt.axis("off")
  217.  
  218. plt.figure(figsize=(12, 5))
  219. plt.plot(range(1, epochs + 1), dnn_accuracy_history, marker='o')
  220. plt.title('DNN Accuracy Over Epochs')
  221. plt.xlabel('Epochs')
  222. plt.ylabel('Accuracy')
  223. plt.grid()
  224.  
  225. plt.figure(figsize=(25, 15))
  226. plt.subplot(2, 2, 1)
  227. plt.imshow(image_training)
  228. plt.title("Training Data")
  229. plt.axis("off")
  230.  
  231. plt.subplot(2, 2, 2)
  232. plt.imshow(image_testing_before_2nd_epoch_cnn)
  233. plt.title("Testing Data (2nd Epoch) - CNN")
  234. plt.axis("off")
  235.  
  236. plt.subplot(2, 2, 3)
  237. plt.imshow(image_testing_after_50_epochs_cnn)
  238. plt.title(f"Testing Data ({epochs} Epochs) - CNN")
  239. plt.axis("off")
  240.  
  241. plt.subplot(2, 2, 4)
  242. plt.imshow(image_personNEW_cnn)
  243. plt.title(f"New Person - CNN\nLabel: {personNEW_label_cnn},[{personNEW_prediction_cnn}]")
  244. plt.axis("off")
  245.  
  246. plt.figure(figsize=(12, 5))
  247. plt.plot(range(1, epochs + 1), cnn_accuracy_history, marker='o')
  248. plt.title('CNN Accuracy Over Epochs')
  249. plt.xlabel('Epochs')
  250. plt.ylabel('Accuracy')
  251. plt.grid()
  252.  
  253. # Confusion Matrix and Performance Metrics for DNN
  254. dnn_predictions = (dnn_model.predict(X_test_normalized) > 0.5).astype(int)
  255. dnn_conf_matrix = confusion_matrix(y_test, dnn_predictions)
  256. print(f"Confusion Matrix for DNN:\n{dnn_conf_matrix}")
  257. dnn_accuracy = accuracy_score(y_test, dnn_predictions)
  258. dnn_precision = precision_score(y_test, dnn_predictions)
  259. dnn_recall = recall_score(y_test, dnn_predictions)
  260. dnn_f1 = f1_score(y_test, dnn_predictions)
  261. print(f"DNN Accuracy: {dnn_accuracy:.4f}")
  262. print(f"DNN Precision: {dnn_precision:.4f}")
  263. print(f"DNN Recall: {dnn_recall:.4f}")
  264. print(f"DNN F1 Score: {dnn_f1:.4f}")
  265.  
  266. # Confusion Matrix and Performance Metrics for CNN
  267. cnn_predictions = (cnn_model.predict(X_test_cnn) > 0.5).astype(int)
  268. cnn_conf_matrix = confusion_matrix(y_test, cnn_predictions)
  269. print(f"Confusion Matrix for CNN:\n{cnn_conf_matrix}")
  270. cnn_accuracy = accuracy_score(y_test, cnn_predictions)
  271. cnn_precision = precision_score(y_test, cnn_predictions)
  272. cnn_recall = recall_score(y_test, cnn_predictions)
  273. cnn_f1 = f1_score(y_test, cnn_predictions)
  274. print(f"CNN Accuracy: {cnn_accuracy:.4f}")
  275. print(f"CNN Precision: {cnn_precision:.4f}")
  276. print(f"CNN Recall: {cnn_recall:.4f}")
  277. print(f"CNN F1 Score: {cnn_f1:.4f}")
  278.  
  279. # Display confusion matrices
  280. plt.figure(figsize=(12, 5))
  281.  
  282. plt.subplot(1, 2, 1)
  283. sns.heatmap(dnn_conf_matrix, annot=True, fmt='d', cmap='Blues')
  284. plt.xlabel('Predicted')
  285. plt.ylabel('Actual')
  286. plt.title('DNN Confusion Matrix')
  287.  
  288. plt.subplot(1, 2, 2)
  289. sns.heatmap(cnn_conf_matrix, annot=True, fmt='d', cmap='Blues')
  290. plt.xlabel('Predicted')
  291. plt.ylabel('Actual')
  292. plt.title('CNN Confusion Matrix')
  293.  
  294. plt.show()
  295.  
  296. # Optimalizace nového vektoru pro predikci co nejblíže 0.52
  297. target_prediction = 0.52
  298. input_shape = 4
  299. new_vector = np.random.randn(input_shape)
  300. new_vector = tf.Variable(new_vector, dtype=tf.float32)
  301.  
  302. optimizer = tf.optimizers.Adam(learning_rate=0.1)
  303.  
  304. def loss_function():
  305.     prediction = dnn_model(tf.reshape(new_vector, (1, -1)))
  306.     return tf.abs(prediction - target_prediction)
  307.  
  308. # Gradientní sestup
  309. for _ in range(1000):
  310.     optimizer.minimize(loss_function, [new_vector])
  311.  
  312. # Denormalizace výsledného vektoru
  313. result_vector = new_vector.numpy()
  314. denormalized_vector = result_vector * std_values + mean_values
  315. result_prediction = dnn_model.predict(result_vector.reshape(1, -1))
  316.  
  317. print("Výsledný vektor (normalizovaný):", result_vector)
  318. print("Výsledný vektor (denormalizovaný):", denormalized_vector)
  319. print("Predikce výsledného vektoru:", result_prediction)
  320.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement