Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import pandas as pd
- import tensorflow as tf
- #import numpy as np
- from tensorflow import keras
- from tensorflow.keras import layers
- from sklearn.model_selection import train_test_split
- from sklearn.preprocessing import StandardScaler
- df = pd.read_csv('aat.us.txt', names=["Data","Open","High","Low","Close","Volume"], usecols=[0,1,2,3,4,5] , parse_dates=True, index_col=0)
- t_train, val_train, t_target, val_target = train_test_split(df, df['Close'] , test_size=0.2)
- #normalização
- std = StandardScaler()
- t_train = std.fit_transform(t_train)
- t_target = std.fit_transform(t_target)
- val_train = std.fit_transform(val_train)
- val_target = std.fit_transform(val_target)
- #train model
- def build_model():
- model = keras.Sequential([
- layers.Dense(64, activation='relu', input_shape=[len(t_train.keys())]),
- layers.Dense(64, activation='relu'),
- layers.Dense(1)
- ])
- optimizer = tf.keras.optimizers.RMSprop(0.001)
- model.compile(loss='mse',
- optimizer=optimizer,
- metrics=['mae', 'mse'])
- return model
- model = build_model()
- model.summary()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement