Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def main():
- tf.random.set_seed(1234)
- assert tf.__Version__.startswith('2.')
- (x_train, y_train), (x_val, y_val) = keras.datasets.boston_housing.load_data()
- x_train, x_val = x_train.astype(np.float32), x_val.astype(np.float32)
- print(x_train.shape, y_train.shape, x_val.shape, y_val.shape)
- db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(64)
- db_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)).batch(102)
- model = Linear_regressor()
- criterion = keras.losses.MeanSquaredError()
- optimizer = keras.optimizer.Adam(learning_rate=1e-2)
- for epoch in range(100):
- for step, (x, y) in enumerate(db_train):
- with tf.GradientTape() as tape:
- logits = model(x)
- logits = tf.squeeze(logits, axis=1)
- loss = criterion(y, logits)
- grads = tape.gradient(loss, model.trainable_variables)
- optimizer.apply_gradients(zip(grads, model.trainable_variables))
- print('[INFO] Epoch: {}, Train loss: {}'.format(epoch, loss.numpy()))
- if epoch % 10 == 0:
- for x, y in db_val:
- logits = model(x)
- logits = tf.squeeze(logits, axis=1)
- loss = criterion(y, logits)
- print('[INFO] Epoch: {}, Test loss: {}'.format(epoch, loss.numpy()))
- if __name__ == '__main__':
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement