Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #!/usr/bin/env python
- # coding=utf-8
- from __future__ import absolute_import
- import grammar
- from keras.models import Sequential
- from keras.layers.core import TimeDistributedDense, Masking
- from keras.layers.recurrent import LSTM
- import numpy as np
- import pad
- '''
- Train a LSTM on UCF11 dataset (preprocessed wirh pretrained CNN)
- '''
- np.random.seed(1337)
- print 'Loading data...'
- nb_sample = 2048
- (X, y) = grammar.gen_ERber(nb_sample)
- split = int(len(X)*0.05)
- X_train, y_train = X[split:], y[split:]
- X_test, y_test = X[:split], y[:split]
- batch_size = 64
- maxlen = max(len(x) for x in X) # padding the sequence to limited length
- print 'padding sequence (samples x times x inputdim)'
- X_train = pad.pad_sequences(X_train, maxlen=maxlen)
- X_test = pad.pad_sequences(X_test, maxlen=maxlen)
- y_train = pad.pad_sequences(y_train, maxlen=maxlen)
- y_test = pad.pad_sequences(y_test, maxlen=maxlen)
- print 'X_train shape:', X_train.shape
- print 'X_test shape:', X_test.shape
- print 'Build model...'
- model = Sequential()
- model.add(Masking())
- model.add(LSTM(7, 6, return_sequences=True))
- model.add(TimeDistributedDense(6, 7, activation='sigmoid'))
- # complie model...
- print 'compling model...'
- model.compile(loss='mse', optimizer='adam')
- #model.load_weights('ereber-lstm-weights')
- print X_train.shape,y_train.shape
- # fit model...
- print 'Training...'
- model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=100, validation_data=(X_test, y_test), show_accuracy=True)
- score, acc = model.evaluate(X_test, y_test, batch_size=batch_size, show_accuracy=True)
- print 'test\t score,\t accuracy:', (score, acc)
- print 'saved model as ereber-lstm-weights'
- model.save_weights('ereber-lstm-weights',overwrite=True)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement