snakemelon

machine learing

May 13th, 2022 (edited)
162
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 6.62 KB | None | 0 0
  1. import json
  2.  
  3. import torch
  4.  
  5. from sklearn import preprocessing
  6. from sklearn.model_selection import train_test_split
  7.  
  8. from tensorflow import keras
  9. from tensorflow.keras.callbacks import EarlyStopping
  10. from tensorflow.keras.callbacks import ModelCheckpoint
  11. from tensorflow.keras.optimizers import Adam
  12. # from keras import losses
  13. from tensorflow.keras.models import Sequential
  14. from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten
  15.  
  16. import numpy as np
  17.  
  18. import os
  19.  
  20. os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
  21. os.environ["CUDA_VISIBLE_DEVICES"] = "0"
  22.  
  23.  
  24. JSON_SET_PATH = "json_set/data.json"
  25. GENRE_LIST = ['blues', 'classical', 'country', 'disco', 'hiphop', 'jazz', 'metal', 'pop', 'reggae', 'rock']
  26.  
  27.  
  28. def load_data(json_set_path):
  29.     # X, y = [], []
  30.     with open(json_set_path, "r", encoding="utf-8") as json_data:
  31.         data = json.loads(json_data.read())
  32.  
  33.     Label = preprocessing.LabelEncoder()
  34.     Label.fit(GENRE_LIST)
  35.  
  36.     y = np.array(Label.transform(data["label"]), dtype=float)
  37.     X = data["mfcc"]
  38.  
  39.     # X = np.array(data["mfcc"], dtype=float)
  40.     # print(numpy.array(X).shape)
  41.     # print(len(X[]), len(X[0]))
  42.  
  43.     for i in data["mfcc"]:
  44.         for j in i:
  45.             if len(j) != 647:
  46.                 if len(j) > 647:
  47.                     del j[647:]
  48.                 else:
  49.                     for num in range(0, 647-len(j)):
  50.                         j.append(0)
  51.                     # print("index:", data["mfcc"].index(i), "after append:", len(j))
  52.             else:
  53.                 pass
  54.  
  55.     # print("X.shape", np.array(X).shape, "y.shape", np.array(y).shape)
  56.     # X.shape (1000, 20, 647) y.shape (1000,)
  57.     return np.array(X), np.array(y)
  58.  
  59.  
  60. def define_model(input_shape=(20, 647)):
  61.     model = Sequential()
  62.     model.add(Conv2D(input_shape=input_shape, filters=64, kernel_size=(3, 3), padding="same", activation="relu"))
  63.     model.add(Conv2D(filters=64, kernel_size=(3, 3), padding="same", activation="relu"))
  64.     model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
  65.     model.add(Conv2D(filters=128, kernel_size=(3, 3), padding="same", activation="relu"))
  66.     model.add(Conv2D(filters=128, kernel_size=(3, 3), padding="same", activation="relu"))
  67.     model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
  68.     model.add(Conv2D(filters=256, kernel_size=(3, 3), padding="same", activation="relu"))
  69.     model.add(Conv2D(filters=256, kernel_size=(3, 3), padding="same", activation="relu"))
  70.     model.add(Conv2D(filters=256, kernel_size=(3, 3), padding="same", activation="relu"))
  71.     model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
  72.     model.add(Conv2D(filters=512, kernel_size=(3, 3), padding="same", activation="relu"))
  73.     model.add(Conv2D(filters=512, kernel_size=(3, 3), padding="same", activation="relu"))
  74.     model.add(Conv2D(filters=512, kernel_size=(3, 3), padding="same", activation="relu"))
  75.     model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
  76.     model.add(Conv2D(filters=512, kernel_size=(3, 3), padding="same", activation="relu"))
  77.     model.add(Conv2D(filters=512, kernel_size=(3, 3), padding="same", activation="relu"))
  78.     model.add(Conv2D(filters=512, kernel_size=(3, 3), padding="same", activation="relu"))
  79.     model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
  80.  
  81.     model.add(Flatten())
  82.     model.add(Dense(units=4096, activation="relu"))
  83.     model.add(Dense(units=4096, activation="relu"))
  84.     model.add(Dense(units=2, activation="softmax"))
  85.  
  86.     # show the model information
  87.  
  88.     # model.summary()
  89.     return model
  90.  
  91.  
  92. def main():
  93.     # load the data
  94.     X, y = load_data(json_set_path=JSON_SET_PATH)
  95.  
  96.     # ndarray 转换为tensor张量
  97.     X_tensor = torch.from_numpy(X)
  98.     y_tensor = torch.from_numpy(y)
  99.     # split the data into the train adn test
  100.     X_train, X_test, y_train, y_test = train_test_split(X_tensor, y_tensor, test_size=0.25)
  101.     # X_train.shape torch.Size([750, 20, 647]) y_train_shape torch.Size([750])
  102.     # X_train type: <class 'torch.Tensor'> y_train type: <class 'torch.Tensor'>
  103.  
  104.     # load the model
  105.     model = define_model()
  106.     opt = Adam(learning_rate=0.001)
  107.  
  108.     model.compile(optimizer=opt, loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
  109.     model.fit(x=X_train, y=y_train, batch_size=None, epochs=20, verbose=1, validation_data=(X_test, y_test))
  110.  
  111.     checkpoint = ModelCheckpoint("vgg16_1.h5",
  112.                                  monitor='val_acc',
  113.                                  verbose=1,
  114.                                  save_best_only=True,
  115.                                  save_weights_only=False,
  116.                                  mode='auto', period=1)
  117.     early = EarlyStopping(monitor='val_acc',
  118.                           min_delta=0,
  119.                           patience=20,
  120.                           verbose=1,
  121.                           mode='auto')
  122.     hist = model.fit_generator(steps_per_epoch=100,
  123.                                generator=X_train,
  124.                                validation_data=X_test,
  125.                                validation_steps=10,
  126.                                epochs=100,
  127.                                callbacks=[checkpoint, early])
  128.  
  129.  
  130. if __name__ == "__main__":
  131.     main()
  132.  
  133. ERROR_INFORMATION:
  134. 2022-05-13 19:56:53.435198: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX AVX2
  135. To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
  136. Traceback (most recent call last):
  137.   File "C:/Users/xxxx/Pycharm Projects/Graduation_Project/VGG 16.py", line 132, in <module>
  138.     main()
  139.   File "C:/Users/xxxx/Pycharm Projects/Graduation_Project/VGG 16.py", line 106, in main
  140.     model = define_model()
  141.   File "C:/Users/xxxx/Pycharm Projects/Graduation_Project/VGG 16.py", line 62, in define_model
  142.     model.add(Conv2D(input_shape=input_shape, filters=64, kernel_size=(3, 3), padding="same", activation="relu"))
  143.   File "C:\Users\xxxx\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\training\tracking\base.py", line 629, in _method_wrapper
  144.     result = method(self, *args, **kwargs)
  145.   File "C:\Program Files\Python37\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
  146.     raise e.with_traceback(filtered_tb) from None
  147.   File "C:\Program Files\Python37\lib\site-packages\keras\engine\input_spec.py", line 228, in assert_input_compatibility
  148.     raise ValueError(f'Input {input_index} of layer "{layer_name}" '
  149. ValueError: Input 0 of layer "conv2d" is incompatible with the layer: expected min_ndim=4, found ndim=3. Full shape received: (None, 20, 647)
Add Comment
Please, Sign In to add comment