Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import keras
- from keras import backend as K
- from keras.models import Sequential
- from keras.layers import Activation
- from keras.layers import LSTM
- from keras.layers.core import Dense
- from keras.optimizers import Adam
- from keras.metrics import categorical_crossentropy
- import numpy as np
- from keras.models import Sequential
- from sklearn.preprocessing import MinMaxScaler
- ACTIONS_DIM = 2
- OBSERVATIONS_DIM = 4
- MAX_ITERATIONS = 200
- LEARNING_RATE = 0.001
- def data1 ():
- data = np.genfromtxt("2.txt", delimiter=",")
- print data
- return data
- def samples1(i):
- for b in range (5):
- print "A"
- if (data [b] == ","):
- print "A"
- return 0
- print "B"
- return 0
- def get_model():
- model = Sequential([
- Dense(16, input_shape=(1,), activation="relu"),
- Dense(32, activation="relu"),
- Dense(2, activation="softmax"),
- ])
- model.compile(
- optimizer=Adam(lr=LEARNING_RATE),
- loss='sparse_categorical_crossentropy',
- metrics=["accuracy"],
- )
- model.summary()
- return model
- def get_model2():
- model = Sequential()
- model.add(LSTM(64, input_shape=(1,64), return_sequences=True))
- model.add(Dense(64))
- model.compile(loss='mean_absolute_error', optimizer='adam',metrics=['accuracy'])
- model.summary()
- return model
- def train2(model, scaled_train_samples2, train_labels):
- model.fit(scaled_train_samples2, train_labels, nb_epoch=100, batch_size=1, verbose=2)
- def train(model, scaled_train_samples, train_labels):
- model.fit(scaled_train_samples, train_labels, batch_size = 100, epochs=10, verbose=2)
- def predict(model, train_samples):
- return model.predict(train_samples)
- def main():
- SAMPLES = 65
- data = []
- samples = []
- train_samples = []
- train_labels = []
- predict_samples = []
- predict_labels = []
- data = data1()
- Date =data[:,][:,0]
- Open =data[:,][:,1]
- High =data[:,][:,2]
- Low =data[:,][:,3]
- Close =data[:,][:,4]
- Volume =data[:,][:,5]
- OpenInt =data[:,][:,6]
- print Date
- print Open
- print High
- print Low
- print Close
- print Volume
- print OpenInt
- for i in range(SAMPLES):
- # samples.append(Open[i])
- samples.append(High[i])
- samples.append(Low[i])
- # samples.append(Close[i])
- print samples
- for i in range(SAMPLES-1):
- if (samples[i+1] >= samples[i]):
- a=1
- else:
- a=0
- train_samples.append(samples[i])
- train_labels.append(a)
- train_samples = np.array(train_samples)
- train_labels = np.array(train_labels)
- train_labels2 = train_labels.reshape((1,1,SAMPLES-1))
- scaler = MinMaxScaler(feature_range=(0,1))
- scaled_train_samples = scaler.fit_transform((train_samples).reshape(-1,1))
- scaled_train_samples2 = train_samples.reshape((1,1,SAMPLES-1))
- print scaled_train_samples
- print train_labels
- model = get_model()
- model2 = get_model2()
- train(model, scaled_train_samples, train_labels)
- print scaled_train_samples2
- train2(model2, scaled_train_samples2, train_labels2)
- for i in range(100):
- if (samples[i+SAMPLES+1] >= samples[i+SAMPLES]):
- a=1
- else:
- a=0
- predict_samples.append(samples[i])
- predict_labels.append(a)
- predict_samples = np.array(predict_samples)
- predict_labels = np.array(predict_labels)
- scaler = MinMaxScaler(feature_range=(0,1))
- scaled_predict_samples = scaler.fit_transform((predict_samples).reshape(-1,1))
- print predict(model,scaled_predict_samples)
- print "hallo"
- return 0
- if __name__ == '__main__':
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement