Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def create_model():
- model_in = Input(shape=(1,), batch_shape=(1, 1, 900), name='seq_model_in')
- lstm_layer = LSTM(1, batch_input_shape=(1, 1, 900), stateful=stateful)(model_in)
- dense_final = Dense(900, activation='sigmoid')(lstm_layer)
- model = Model(inputs=model_in, outputs=dense_final)
- return model
- model = create_model()
- model.compile(loss='binary_crossentropy', optimizer='adam')
- #Training loop
- for epoch in range(num_epochs):
- for i in range(num_train_examples):
- for j in range(sequence_length):
- tr_loss = model.train_on_batch(train[i][j], y_true)
- #Accumulate the training loss, etc.
- #This has to happen instead of the spot where I call create_model() and model.compile
- if num_gpus <= 1
- print("training with 1 GPU")
- model = create_model()
- else:
- print("training with multiple GPUs")
- with tf.device("/cpu:0"):
- model = create_model()
- model = multi_gpu_model(model, gpus=num_gpus)
- model.compile(loss='binary_crossentropy', optimizer='adam')
Add Comment
Please, Sign In to add comment