vlpap

Weighted_Hausdorff_loss

May 13th, 2020
134
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.80 KB | None | 0 0
  1. import math
  2. from sklearn.utils.extmath import cartesian
  3.  
  4. resized_height = 128  
  5. resized_width  = 128
  6. max_dist = math.sqrt(resized_height**2 + resized_width**2)
  7. n_pixels = resized_height * resized_width
  8. all_img_locations = tf.convert_to_tensor(cartesian([np.arange(resized_height), np.arange(resized_width)]),
  9.                                                    tf.float32)
  10.  
  11.  
  12. def tf_repeat(tensor, repeats):
  13.     """
  14.    Args:
  15.    input: A Tensor. 1-D or higher.
  16.    repeats: A list. Number of repeat for each dimension, length must be the same as the number of dimensions in input
  17.    Returns:
  18.    
  19.    A Tensor. Has the same type as input. Has the shape of tensor.shape * repeats
  20.    """
  21.     with tf.compat.v1.variable_scope("repeat"):
  22.         expanded_tensor = tf.expand_dims(tensor, -1)
  23.         multiples = [1] + repeats
  24.         tiled_tensor = tf.tile(expanded_tensor, multiples = multiples)
  25.         repeated_tesnor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)
  26.     return repeated_tesnor
  27.  
  28.  
  29.  
  30. def Weighted_Hausdorff_loss(y_true, y_pred):
  31.     # https://arxiv.org/pdf/1806.07564.pdf
  32.     #prob_map_b - y_pred
  33.     #gt_b - y_true
  34.  
  35.     terms_1 = []
  36.     terms_2 = []
  37.     y_true = tf.squeeze(y_true, axis=-1)
  38.     y_pred = tf.squeeze(y_pred, axis=-1)
  39. #     y_true = tf.reduce_mean(y_true, axis=-1)
  40. #     y_pred = tf.reduce_mean(y_pred, axis=-1)
  41.     for b in range(1):
  42.         gt_b = y_true[b]
  43.         prob_map_b = y_pred[b]
  44.         # Pairwise distances between all possible locations and the GTed locations
  45.         n_gt_pts = tf.reduce_sum(gt_b)
  46.         gt_b = tf.where(tf.cast(gt_b, tf.bool))
  47.         gt_b = tf.cast(gt_b, tf.float32)
  48.         d_matrix = tf.sqrt(tf.maximum(tf.reshape(tf.reduce_sum(gt_b*gt_b, axis=1), (-1, 1)) + tf.reduce_sum(all_img_locations*all_img_locations, axis=1)-2*(tf.matmul(gt_b, tf.transpose(all_img_locations))), 0.0))
  49.         d_matrix = tf.transpose(d_matrix)
  50.         # Reshape probability map as a long column vector,
  51.         # and prepare it for multiplication
  52.        
  53.         p = tf.reshape(prob_map_b, (n_pixels, -1))
  54.         n_est_pts = tf.reduce_sum(p)
  55.         p_replicated = tf_repeat(tf.reshape(p, (-1, 1)), [1, n_gt_pts])
  56.         eps = 1e-6
  57.         alpha = 4
  58.         # Weighted Hausdorff Distance
  59.         term_1 = (1 / (n_est_pts + eps)) * tf.reduce_sum(p * tf.reshape(tf.reduce_min(d_matrix, axis=1), (-1, 1)))
  60.        
  61.         d_div_p = tf.reduce_min((d_matrix + eps) / (p_replicated**alpha + eps / max_dist), axis=0)
  62.         d_div_p = tf.clip_by_value(d_div_p, 0, max_dist)
  63.         term_2 = tf.reduce_mean(d_div_p, axis=0)
  64.         terms_1.append(term_1)
  65.         terms_2.append(term_2)
  66.     terms_1 = tf.stack(terms_1)
  67.     terms_2 = tf.stack(terms_2)
  68.     #terms_1 = tf.compat.v1.Print(tf.reduce_mean(terms_1), [tf.reduce_mean(terms_1)], "term 1")
  69.     #terms_2 = tf.compat.v1.Print(tf.reduce_mean(terms_2), [tf.reduce_mean(terms_2)], "term 2")
  70.    
  71.     terms_1 = tf.identity(tf.reduce_mean(terms_1), [tf.reduce_mean(terms_1)], "term 1")
  72.     terms_2 = tf.identity(tf.reduce_mean(terms_2), [tf.reduce_mean(terms_2)], "term 2")
  73.     res = terms_1 + terms_2
  74.     return res
  75.  
  76.  
  77.  
  78.  
  79.  
  80. ###############################################################################################
  81.  
  82. def cdist (A, B):  
  83.    
  84.         # squared norms of each row in A and B
  85.         na = tf.reduce_sum(tf.square(A), 1)
  86.         nb = tf.reduce_sum(tf.square(B), 1)
  87.        
  88.         # na as a row and nb as a co"lumn vectors
  89.         na = tf.reshape(na, [-1, 1])
  90.         nb = tf.reshape(nb, [1, -1])
  91.        
  92.         # return pairwise euclidead difference matrix
  93.         D = tf.sqrt(tf.maximum(na - 2*tf.matmul(A, B, False, True) + nb, 0.0))
  94.         return D
  95.  
  96. def weighted_hausdorff_distance(y_true, y_pred):
  97.            
  98.     W = 256
  99.     H = 256
  100.     alpha = 2
  101.     max_dist = math.sqrt(resized_height**2 + resized_width**2)
  102.     eps = 1e-6
  103.     all_img_locations = tf.convert_to_tensor(cartesian([np.arange(W), np.arange(H)]), dtype=tf.float32)
  104.     y_true = K.reshape(y_true, [W,H])
  105.     gt_points = K.cast(tf.where(y_true > 0.5), dtype = tf.float32)
  106.     num_gt_points = tf.shape(gt_points)[0]
  107.  
  108.     y_pred = K.flatten(y_pred)
  109.     p = y_pred
  110.     p_replicated = tf.squeeze(K.repeat(tf.expand_dims(p,axis=-1), num_gt_points))
  111.    
  112.     d_matrix = cdist(all_img_locations, gt_points)
  113.     num_est_pts = tf.reduce_sum(p)
  114.    
  115.     term_1 = (1 / (num_est_pts + eps)) * K.sum(p * K.min(d_matrix, 1))
  116.    
  117.  
  118.     d_div_p = K.min((d_matrix + eps) / (p_replicated**alpha + (eps / max_dist)), 0)
  119.     d_div_p = K.clip(d_div_p, 0, max_dist)
  120.     term_2 = K.mean(d_div_p, axis=0)
  121.  
  122.     return term_1 + term_2
  123.  
  124. def hausdorff_loss(y_true, y_pred):
  125.     batched_losses = tf.map_fn(lambda x: weighted_hausdorff_distance(x[0], x[1]), (y_true, y_pred), dtype=tf.float32)
  126.     return K.mean(tf.stack(batched_losses))
Add Comment
Please, Sign In to add comment