def create_model(optimizer="adam"): model = Sequential() #print "Model Definition" model.add(ZeroPadding2D((10,13),input_shape=(6,3,3))) model.add(Convolution2D(115,8,8)) #print model.output_shape model.add(MaxPooling2D((7,7))) #print model.output_shape model.add(Flatten()) #print model.output_shape #model.add(Dense(output_dim=12, input_dim=12, init="he_normal",activation='relu')) #model.add(Dense(output_dim=200, input_dim=400, init="he_normal",activation='relu')) #model.add(Dense(output_dim=100, input_dim=200, init="he_normal",activation='relu')) model.add(Dense(output_dim = 32, input_dim=690, init="he_normal",activation='relu')) model.add(Dense(output_dim=1, input_dim=32, init="he_normal",activation='tanh')) model.add(Dense(output_dim=1, init="he_normal", activation='linear')) #print model.summary() model.compile(loss='mean_squared_error', optimizer=optimizer) return model #reduce_lr=ReduceLROnPlateau(monitor='val_loss', factor=0.01, patience=3, verbose=1, mode='auto', epsilon=0.1, cooldown=0, min_lr=0.000000000000000001) #stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto') #log=csv_logger = CSVLogger('training_'+str(i)+'.csv') #print "Model Train" #hist_current = model.fit(np.array(data_train_input), # np.array(data_train_output), # shuffle=False, # validation_data=(np.array(data_test_input),np.array(data_test_output)), # validation_split=0.1, # nb_epoch=150000, # verbose=1, # callbacks=[reduce_lr,log,stop]) #print() #print model.summary() #print "Model stored" #model.save(spectogram_path+"Model"+str(feature)+".h5") #model.save_weights(spectogram_path+"Model"+str(feature)+"_weights.h5") #del model ## Make it work for other feature ranges ## Add the CNN part and test it ## Try with gabor kernels as suggested by the other paper.. input_train, input_test, output_train, output_test = model(0,train_input_data_interweawed_normalized,output_data_train,test_input_data_interweawed_normalized,output_data_test) seed = 7 np.random.seed(seed) print "Regressor" model = KerasRegressor(build_fn = create_model, nb_epoch = 100) optimizer = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam'] param_grid = dict(optimizer=optimizer) grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=10) print "Grid fit" grid_result = grid.fit(np.asarray(input_train[:-(len(input_train)/1000)]), np.array(output_train[:-(len(output_train)/1000)])) # summarize results print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_['mean_test_score'] stds = grid_result.cv_results_['std_test_score'] params = grid_result.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param))