Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from keras.utils import multi_gpu_model
- from keras.models import Sequential
- from keras.layers import Dense, InputLayer
- import numpy as np
- # select how many GPU's to run - sets the BATCH SIZE = GPUs
- split = 4
- # 1200 features, 1200 samples, 3 GPUs, 3 Batch Size, Target 10 Classes
- # create data
- data_size = 1200
- classes = 10
- X = np.random.rand(data_size, data_size)
- Y = np.random.randint(2, size= (data_size, classes))
- # 2.4k samples of 1.2k features
- print(X.shape, Y.shape)
- # create network
- model = Sequential()
- model.add(InputLayer(input_shape=(data_size,)))
- model.add(Dense(units=8292, activation='relu'))
- model.add(Dense(units=4096, activation='relu'))
- model.add(Dense(units=2048, activation='relu'))
- model.add(Dense(units=1024, activation='relu'))
- model.add(Dense(units=512, activation='relu'))
- model.add(Dense(units=256, activation='relu'))
- model.add(Dense(units=classes, activation='softmax'))
- print(model.summary())
- parallel_model = multi_gpu_model(model, gpus=split)
- parallel_model.compile(loss='categorical_crossentropy',
- optimizer='rmsprop',
- metrics=['accuracy'])
- # fit the model
- parallel_model.fit(X, Y, epochs=100, batch_size=split)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement