Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def content_loss(base, combination):
- return K.sum(K.square(combination - base))
- outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
- content_layer = 'block5_conv2'
- style_layers = ['block1_conv1',
- 'block2_conv1',
- 'block3_conv1',
- 'block4_conv1',
- 'block5_conv1']
- total_variation_weight = 1e-4
- style_weight = 1.
- content_weight = 0.025
- #K here refers to the keras backend
- loss = K.variable(0.)
- layer_features = outputs_dict[content_layer]
- target_image_features = layer_features[0, :, :, :]
- combination_features = layer_features[2, :, :, :]
- loss += content_weight * content_loss(target_image_features,
- combination_features)
- return K.sum(K.square(combination - base)) # that's exactly the definition of L2-norm
Add Comment
Please, Sign In to add comment