Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from keras.utils import to_categorical
- from sklearn.model_selection import train_test_split
- from sklearn.metrics import confusion_matrix
- import keras
- from keras.models import Sequential
- from keras.layers import Dense
- from keras.layers import Activation
- from keras.layers import Conv2D
- from keras.layers import MaxPooling2D
- from keras.layers import Dropout
- from keras.layers import Flatten
- from keras.layers import BatchNormalization
- import numpy as np
- X = np.load('x_gtzan_npy.npy')
- y = np.load('y_grzan_npy.npy')
- y = to_categorical(y)
- X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify = y)
- X_val = X_train[:3800]
- y_val = y_train[:3800]
- X_train = X_train[3800:]
- y_train = y_train[3800:]
- # Model Definition
- input_shape = X_train[0].shape
- print (input_shape)
- model = Sequential()
- # Conv Block 1
- model.add(Conv2D(8, kernel_size=(3, 3), strides=(1, 1),
- activation='relu', input_shape=input_shape))
- model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
- model.add(Dropout(0.25))
- # Conv Block 2
- model.add(Conv2D(16, (3, 3), strides=(1, 1), activation='relu'))
- model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), data_format="channels_last"))
- model.add(Dropout(0.25))
- # Conv Block 3
- model.add(Conv2D(32, (3, 3), strides=(1, 1), activation='relu'))
- model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), data_format="channels_last"))
- model.add(Dropout(0.25))
- # Conv Block 4
- model.add(Conv2D(64, (3, 3), strides=(1, 1), activation='relu'))
- model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), data_format="channels_last"))
- model.add(Dropout(0.25))
- # Conv Block 5
- model.add(Conv2D(32, (3, 3), strides=(1, 1), activation='relu'))
- model.add(MaxPooling2D(pool_size=(1, 1), strides=(1, 1), data_format="channels_last"))
- model.add(Dropout(0.25))
- # MLP
- model.add(Flatten())
- model.add(Dense(10, activation='softmax'))
- model.summary()
- model.compile(loss=keras.losses.categorical_crossentropy,
- optimizer=keras.optimizers.Adam(),
- metrics=['accuracy'])
- hist = model.fit(X_train, y_train,
- batch_size=32,
- epochs=50,
- verbose=1,
- validation_data=(X_val, y_val))
- score = model.evaluate(X_test, y_test, verbose=0)
- print("val_loss = {:.3f} and val_acc = {:.3f}".format(score[0], score[1]))
- input_shape = X_train[0].shape
- def cnn_vgg16(input_shape, freezed_layers):
- input_tensor = Input(shape=input_shape)
- vgg16 = VGG16(include_top=False, weights='imagenet',
- input_tensor=input_tensor)
- top = Sequential()
- top.add(Flatten(input_shape=vgg16.output_shape[1:]))
- top.add(Dense(128, activation='relu'))
- top.add(Dropout(0.5))
- top.add(Dense(10, activation='softmax'))
- model = Model(inputs=vgg16.input, outputs=top(vgg16.output))
- for layer in model.layers[:freezed_layers]:
- layer.trainable = False
- return model
- model = cnn_vgg16(input_shape, 5)
- model.summary()
- model.compile(loss=keras.losses.categorical_crossentropy,
- optimizer=keras.optimizers.Adam(),
- metrics=['accuracy'])
- hist = model.fit(X_train, y_train,
- batch_size=128,
- epochs=20,
- verbose=1,
- validation_data=(X_val, y_val))
- score = model.evaluate(X_test, y_test, verbose=0)
- print("val_loss = {:.3f} and val_acc = {:.3f}".format(score[0], score[1]))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement