Guest User

Untitled

a guest
Feb 25th, 2018
72
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.01 KB | None | 0 0
  1. def create_model():
  2. model_in = Input(shape=(1,), batch_shape=(1, 1, 900), name='seq_model_in')
  3. lstm_layer = LSTM(1, batch_input_shape=(1, 1, 900), stateful=stateful)(model_in)
  4. dense_final = Dense(900, activation='sigmoid')(lstm_layer)
  5. model = Model(inputs=model_in, outputs=dense_final)
  6. return model
  7.  
  8. model = create_model()
  9. model.compile(loss='binary_crossentropy', optimizer='adam')
  10.  
  11. #Training loop
  12. for epoch in range(num_epochs):
  13. for i in range(num_train_examples):
  14. for j in range(sequence_length):
  15. tr_loss = model.train_on_batch(train[i][j], y_true)
  16. #Accumulate the training loss, etc.
  17.  
  18. #This has to happen instead of the spot where I call create_model() and model.compile
  19. if num_gpus <= 1
  20. print("training with 1 GPU")
  21. model = create_model()
  22. else:
  23. print("training with multiple GPUs")
  24. with tf.device("/cpu:0"):
  25. model = create_model()
  26. model = multi_gpu_model(model, gpus=num_gpus)
  27. model.compile(loss='binary_crossentropy', optimizer='adam')
Add Comment
Please, Sign In to add comment