Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- ### ATTEMPT #1:
- def pi_ini(shape):
- init=np.random.uniform(low=0.0, high=1.0, size=shape)
- return init
- def spike_ini(shape, pi, dtype=np.float32):
- init = np.random.binomial(n=1,p=pi,size=shape).astype(dtype=dtype)
- return init
- class PenultimateLayer(Layer):
- def __init__(self, units, pi_initializer, spike_initializer,
- activation=None,
- use_bias=True,
- kernel_initializer='he_normal',
- bias_initializer='he_normal',
- kernel_regularizer=None,
- bias_regularizer=None,
- activity_regularizer=None,
- kernel_constraint=None,
- bias_constraint=None,
- **kwargs):
- if 'input_shape' not in kwargs and 'input_dim' in kwargs:
- kwargs['input_shape'] = (kwargs.pop('input_dim'),)
- super(PenultimateLayer, self).__init__(**kwargs)
- self.units = units
- self.activation = activations.get(activation)
- self.use_bias = use_bias
- self.pi_initializer=pi_initializer
- self.spike_initializer=spike_initializer
- self.kernel_initializer = initializers.get(kernel_initializer)
- self.bias_initializer = initializers.get(bias_initializer)
- self.kernel_regularizer = regularizers.get(kernel_regularizer)
- self.bias_regularizer = regularizers.get(bias_regularizer)
- self.activity_regularizer = regularizers.get(activity_regularizer)
- self.kernel_constraint = constraints.get(kernel_constraint)
- self.bias_constraint = constraints.get(bias_constraint)
- self.input_spec = InputSpec(min_ndim=2)
- self.supports_masking = True
- def build(self, input_shape):
- assert len(input_shape) >= 2
- input_dim = input_shape[-1]
- self.kernel = self.add_weight(shape=(input_dim, self.units),
- initializer=self.kernel_initializer,
- name='kernel',
- regularizer=self.kernel_regularizer,
- constraint=self.kernel_constraint)
- self.pi=self.add_weight(shape=(input_dim, self.units),
- initializer=self.pi_initializer,
- name='pi',
- regularizer=None,
- constraint=None
- )
- self.spike=self.add_weight(shape=(input_dim, self.units),
- initializer=self.spike_initializer,
- name='spike',
- regularizer=None,
- constraint=None)
- if self.use_bias:
- self.bias = self.add_weight(shape=(self.units,),
- initializer=self.bias_initializer,
- name='bias',
- regularizer=self.bias_regularizer,
- constraint=self.bias_constraint)
- else:
- self.bias = None
- self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
- self.built = True
- def call(self, inputs):
- output = K.dot(inputs, (self.spike*self.kernel))
- if self.use_bias:
- output = K.bias_add(output, self.bias, data_format='channels_last')
- if self.activation is not None:
- output = self.activation(output)
- return output
- def compute_output_shape(self, input_shape):
- assert input_shape and len(input_shape) >= 2
- assert input_shape[-1]
- output_shape = list(input_shape)
- output_shape[-1] = self.units
- return tuple(output_shape)
- ### ATTEMPT #2:
- class PenultimateLayer(Layer):
- def __init__(self, units, pi_initializer,
- activation=None,
- use_bias=True,
- kernel_initializer='he_normal',
- bias_initializer='he_normal',
- kernel_regularizer=None,
- bias_regularizer=None,
- activity_regularizer=None,
- kernel_constraint=None,
- bias_constraint=None,
- **kwargs):
- if 'input_shape' not in kwargs and 'input_dim' in kwargs:
- kwargs['input_shape'] = (kwargs.pop('input_dim'),)
- super(PenultimateLayer, self).__init__(**kwargs)
- self.units = units
- self.activation = activations.get(activation)
- self.use_bias = use_bias
- # def spikeVar_initializer(shape, pi, name=None):
- # sp_vars=np.random.binomial(n=1, p=pi, size=shape)
- # return K.variable(sp_vars.astype(dtype=np.float32), name=name)
- self.pi_initializer=spike_initializer
- self.kernel_initializer = initializers.get(kernel_initializer)
- self.bias_initializer = initializers.get(bias_initializer)
- self.kernel_regularizer = regularizers.get(kernel_regularizer)
- self.bias_regularizer = regularizers.get(bias_regularizer)
- self.activity_regularizer = regularizers.get(activity_regularizer)
- self.kernel_constraint = constraints.get(kernel_constraint)
- self.bias_constraint = constraints.get(bias_constraint)
- self.input_spec = InputSpec(min_ndim=2)
- self.supports_masking = True
- def build(self, input_shape):
- assert len(input_shape) >= 2
- input_dim = input_shape[-1]
- self.kernel = self.add_weight(shape=(input_dim, self.units),
- initializer=self.kernel_initializer,
- name='kernel',
- regularizer=self.kernel_regularizer,
- constraint=self.kernel_constraint)
- self.pi=self.add_weight(shape=(input_dim, self.units),
- initializer=self.pi_initializer,
- name='pi',
- regularizer=None,
- constraint=None
- )
- if self.use_bias:
- self.bias = self.add_weight(shape=(self.units,),
- initializer=self.bias_initializer,
- name='bias',
- regularizer=self.bias_regularizer,
- constraint=self.bias_constraint)
- else:
- self.bias = None
- self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
- self.built = True
- def call(self, inputs):
- spike=tfp.distributions.Bernoulli(logits=None,probs=self.pi, dtype=tf.dtypes.float32).sample()
- output = K.dot(inputs, (spike*self.kernel))
- if self.use_bias:
- output = K.bias_add(output, self.bias, data_format='channels_last')
- if self.activation is not None:
- output = self.activation(output)
- return output
- def compute_output_shape(self, input_shape):
- assert input_shape and len(input_shape) >= 2
- assert input_shape[-1]
- output_shape = list(input_shape)
- output_shape[-1] = self.units
- return tuple(output_shape)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement