Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # Odessa Version
- # Modified version of HTW approach
- def _steering_onpu(self):
- model = Sequential()
- model.add(BatchNormalization(input_shape=(96,128,3)))
- # Convolution no.1
- model.add(
- Conv2D(
- filters=16,
- kernel_size=(5, 5),
- strides=(2, 2),
- bias_initializer='he_normal',
- padding="valid"))
- model.add(ELU())
- model.add(MaxPooling2D(pool_size=(2, 2)))
- model.add(BatchNormalization())
- # Convolution no.2
- model.add(
- Conv2D(
- filters=24,
- kernel_size=(5, 5),
- strides=(2, 2),
- bias_initializer='he_normal',
- padding="valid"))
- model.add(ELU())
- model.add(BatchNormalization())
- # Convolution no.3
- model.add(
- Conv2D(
- filters=32,
- kernel_size=(3, 3),
- bias_initializer='he_normal',
- padding="valid"))
- model.add(ELU())
- model.add(BatchNormalization())
- model.add(Flatten())
- # Fully connected no.1
- model.add(
- Dense(
- units=300,
- bias_initializer='he_normal'))
- model.add(ELU())
- model.add(BatchNormalization())
- model.add(Dropout(0.25))
- # Fully connected no.2
- model.add(
- Dense(
- units=100,
- bias_initializer='he_normal'))
- model.add(ELU())
- model.add(BatchNormalization())
- model.add(Dropout(0.25))
- # Fully connected no.3
- model.add(
- Dense(
- units=1,
- bias_initializer='he_normal',
- activation='tanh',
- name='prediction'))
- opt = optimizers.Nadam()
- model.compile(loss='mse', optimizer=opt, metrics=['mse'])
- model.summary()
- return model
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement