Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- claim_input = Input(shape=(train_max_length,), name='claim_input')
- claim_emb = Embedding(output_dim=50, input_dim=num_words,
- input_length=train_max_length)(claim_input)
- claim_out = LSTM(128, dropout=0.3, recurrent_dropout=0.3,
- kernel_regularizer=regularizers.l2(0.01))(claim_emb)
- evidence_input = Input(shape=(evid_max_length,), name='evidence_input')
- evidence_emb = Embedding(output_dim=50, input_dim=num_words,
- input_length=evid_max_length)(evidence_input)
- evidence_out = LSTM(128, dropout=0.3, recurrent_dropout=0.3,
- kernel_regularizer=regularizers.l2(0.01))(evidence_emb)
- x = concatenate([claim_out, evidence_out])
- x = Dense(1, activation='sigmoid')(x)
- sum_model = Model(inputs=[claim_input , evidence_input], outputs=[x])
- # summarize layers
- print(sum_model.summary())
- optimizer = Adam(lr=1e-3)
- sum_model.compile(loss='binary_crossentropy',
- optimizer=optimizer,
- metrics=['accuracy'])
- earlystopping = EarlyStopping(monitor='val_loss', patience=3, verbose=1)
- lr_reduction = ReduceLROnPlateau(monitor='val_loss',
- factor=0.1, min_lr=1e-5, patience=0,
- verbose=1)
- callbacks = [
- earlystopping,
- checkpoint,
- lr_reduction
- ]
- sum_model.fit([claim_input , evidence_input],
- new_label,
- epochs=5,
- batch_size=128,
- steps_per_epoch=6000)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement