Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import json
- import torch
- from sklearn import preprocessing
- from sklearn.model_selection import train_test_split
- from tensorflow import keras
- from tensorflow.keras.callbacks import EarlyStopping
- from tensorflow.keras.callbacks import ModelCheckpoint
- from tensorflow.keras.optimizers import Adam
- # from keras import losses
- from tensorflow.keras.models import Sequential
- from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten
- import numpy as np
- import os
- os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
- os.environ["CUDA_VISIBLE_DEVICES"] = "0"
- JSON_SET_PATH = "json_set/data.json"
- GENRE_LIST = ['blues', 'classical', 'country', 'disco', 'hiphop', 'jazz', 'metal', 'pop', 'reggae', 'rock']
- def load_data(json_set_path):
- # X, y = [], []
- with open(json_set_path, "r", encoding="utf-8") as json_data:
- data = json.loads(json_data.read())
- Label = preprocessing.LabelEncoder()
- Label.fit(GENRE_LIST)
- y = np.array(Label.transform(data["label"]), dtype=float)
- X = data["mfcc"]
- # X = np.array(data["mfcc"], dtype=float)
- # print(numpy.array(X).shape)
- # print(len(X[]), len(X[0]))
- for i in data["mfcc"]:
- for j in i:
- if len(j) != 647:
- if len(j) > 647:
- del j[647:]
- else:
- for num in range(0, 647-len(j)):
- j.append(0)
- # print("index:", data["mfcc"].index(i), "after append:", len(j))
- else:
- pass
- # print("X.shape", np.array(X).shape, "y.shape", np.array(y).shape)
- # X.shape (1000, 20, 647) y.shape (1000,)
- return np.array(X), np.array(y)
- def define_model(input_shape=(20, 647)):
- model = Sequential()
- model.add(Conv2D(input_shape=input_shape, filters=64, kernel_size=(3, 3), padding="same", activation="relu"))
- model.add(Conv2D(filters=64, kernel_size=(3, 3), padding="same", activation="relu"))
- model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
- model.add(Conv2D(filters=128, kernel_size=(3, 3), padding="same", activation="relu"))
- model.add(Conv2D(filters=128, kernel_size=(3, 3), padding="same", activation="relu"))
- model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
- model.add(Conv2D(filters=256, kernel_size=(3, 3), padding="same", activation="relu"))
- model.add(Conv2D(filters=256, kernel_size=(3, 3), padding="same", activation="relu"))
- model.add(Conv2D(filters=256, kernel_size=(3, 3), padding="same", activation="relu"))
- model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
- model.add(Conv2D(filters=512, kernel_size=(3, 3), padding="same", activation="relu"))
- model.add(Conv2D(filters=512, kernel_size=(3, 3), padding="same", activation="relu"))
- model.add(Conv2D(filters=512, kernel_size=(3, 3), padding="same", activation="relu"))
- model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
- model.add(Conv2D(filters=512, kernel_size=(3, 3), padding="same", activation="relu"))
- model.add(Conv2D(filters=512, kernel_size=(3, 3), padding="same", activation="relu"))
- model.add(Conv2D(filters=512, kernel_size=(3, 3), padding="same", activation="relu"))
- model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
- model.add(Flatten())
- model.add(Dense(units=4096, activation="relu"))
- model.add(Dense(units=4096, activation="relu"))
- model.add(Dense(units=2, activation="softmax"))
- # show the model information
- # model.summary()
- return model
- def main():
- # load the data
- X, y = load_data(json_set_path=JSON_SET_PATH)
- # ndarray 转换为tensor张量
- X_tensor = torch.from_numpy(X)
- y_tensor = torch.from_numpy(y)
- # split the data into the train adn test
- X_train, X_test, y_train, y_test = train_test_split(X_tensor, y_tensor, test_size=0.25)
- # X_train.shape torch.Size([750, 20, 647]) y_train_shape torch.Size([750])
- # X_train type: <class 'torch.Tensor'> y_train type: <class 'torch.Tensor'>
- # load the model
- model = define_model()
- opt = Adam(learning_rate=0.001)
- model.compile(optimizer=opt, loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
- model.fit(x=X_train, y=y_train, batch_size=None, epochs=20, verbose=1, validation_data=(X_test, y_test))
- checkpoint = ModelCheckpoint("vgg16_1.h5",
- monitor='val_acc',
- verbose=1,
- save_best_only=True,
- save_weights_only=False,
- mode='auto', period=1)
- early = EarlyStopping(monitor='val_acc',
- min_delta=0,
- patience=20,
- verbose=1,
- mode='auto')
- hist = model.fit_generator(steps_per_epoch=100,
- generator=X_train,
- validation_data=X_test,
- validation_steps=10,
- epochs=100,
- callbacks=[checkpoint, early])
- if __name__ == "__main__":
- main()
- ERROR_INFORMATION:
- 2022-05-13 19:56:53.435198: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX AVX2
- To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
- Traceback (most recent call last):
- File "C:/Users/xxxx/Pycharm Projects/Graduation_Project/VGG 16.py", line 132, in <module>
- main()
- File "C:/Users/xxxx/Pycharm Projects/Graduation_Project/VGG 16.py", line 106, in main
- model = define_model()
- File "C:/Users/xxxx/Pycharm Projects/Graduation_Project/VGG 16.py", line 62, in define_model
- model.add(Conv2D(input_shape=input_shape, filters=64, kernel_size=(3, 3), padding="same", activation="relu"))
- File "C:\Users\xxxx\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\training\tracking\base.py", line 629, in _method_wrapper
- result = method(self, *args, **kwargs)
- File "C:\Program Files\Python37\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
- raise e.with_traceback(filtered_tb) from None
- File "C:\Program Files\Python37\lib\site-packages\keras\engine\input_spec.py", line 228, in assert_input_compatibility
- raise ValueError(f'Input {input_index} of layer "{layer_name}" '
- ValueError: Input 0 of layer "conv2d" is incompatible with the layer: expected min_ndim=4, found ndim=3. Full shape received: (None, 20, 647)
Add Comment
Please, Sign In to add comment