Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def model1(vocab_len):
- model = Sequential()
- model.add(LSTM(128,input_shape=(buff_length, vocab_len)))
- model.add(Dense(units=60, activation='relu'))
- model.add(Dense(units=vocab_len, activation='softmax'))
- model.summary()
- return model
- def one_hot(Y, char2idx, vocablen):
- Ty = len(Y)
- Yoh = np.zeros((Ty, vocablen))
- for idx in range(Ty):
- Yoh[idx, char2idx[Y[idx]]] = 1
- return Yoh
- def trainer(X, vocab, char2idx, no_epochs=1, batch_size=10):
- model = model1(len(vocab))
- model.compile(optimizer='Adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
- for epn in range(no_epochs):
- np.random.seed(1 + epn)
- Tx = len(X)
- indices = np.random.randint(0, Tx, batch_size)
- X_train = []
- Y_train = []
- for index in indices:
- name = str(X[index])
- for chIndex in range(len(name) - 1):
- if chIndex >= buff_length - 1:
- X_train.append(name[chIndex - buff_length + 1: chIndex + 1])
- Y_train.append(name[chIndex + 1])
- for i in range(len(X_train)):
- print ((X_train[i] + ' : '+ Y_train[i]) )
- X_train_oh = np.copy(one_hot_buffer(X_train, char2idx, len(vocab)))
- Y_train_oh = np.copy(one_hot(Y_train, char2idx, len(vocab)))
- print(X_train_oh.shape,':',Y_train_oh.shape)
- model.fit(x=X_train_oh, y=Y_train_oh)
- model.save('name_model.h5')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement