Advertisement
Guest User

Untitled

a guest
Nov 19th, 2016
208
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C++ 4.08 KB | None | 0 0
  1. #ifndef EUCLIDEAN_LOST_HPP
  2. #define EUCLIDEAN_LOST_HPP
  3.  
  4. #include <dlib/dnn.h>
  5.  
  6. namespace dlib{
  7.  
  8. class loss_euclidean_dist_
  9. {
  10. public:
  11.  
  12.     typedef matrix<float> training_label_type;
  13.     typedef matrix<float> output_label_type;
  14.  
  15.     template <
  16.             typename SUB_TYPE,
  17.             typename label_iterator
  18.             >
  19.     void to_label (
  20.             const tensor& input_tensor,
  21.             const SUB_TYPE& sub,
  22.             label_iterator iter
  23.             ) const
  24.     {
  25.         //static_assert(SUB_TYPE::num_layers-1 > 0, "SUB_TYPE::num_layers-1 > 0");
  26.  
  27.         std::cout<<__func__<<" : num of layers "<<sub.num_layers<<std::endl;
  28.         DLIB_CASSERT(input_tensor.num_samples() != 0);
  29.  
  30.         //std::cout<<sub.num_layers<<std::endl;
  31.         auto const &output_tensor = layer<sub.num_layers-1>(sub).get_output();
  32.  
  33.         DLIB_CASSERT(output_tensor.num_samples() != 0);
  34.         //this for loop should(?) make sure the iter is same as the
  35.         //input labels, how could I do that?
  36.         for (long i = 0; i < output_tensor.num_samples(); ++i)
  37.         {
  38.             *iter++ = image_plane(output_tensor, i);
  39.         }
  40.     }
  41.  
  42.     template <
  43.             typename const_label_iterator,
  44.             typename SUBNET
  45.             >
  46.     double compute_loss_value_and_gradient (
  47.             const tensor& input_tensor,
  48.             const_label_iterator truth,
  49.             SUBNET& sub
  50.             ) const
  51.     {
  52.         DLIB_CASSERT(input_tensor.num_samples() != 0);
  53.  
  54.         auto const &output_tensor = sub.get_output();
  55.         tensor& grad = sub.get_gradient_input();
  56.         DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples());
  57.         DLIB_CASSERT(grad.nc() == input_tensor.nc() && grad.nr() == input_tensor.nr());
  58.         DLIB_CASSERT(input_tensor.nr() == output_tensor.nr() &&
  59.                      input_tensor.nc() == output_tensor.nc() &&
  60.                      input_tensor.k() == output_tensor.k());
  61.  
  62.         double loss = 0;
  63.         output_label_type update_gradient(grad.nr(), grad.nc());
  64.         update_gradient = 0;
  65.         for(long i = 0; i != output_tensor.num_samples(); ++i){
  66.             auto const predict_residual = image_plane(output_tensor, i);
  67.             DLIB_CASSERT(predict_residual.nr() == truth->nr() &&
  68.                          predict_residual.nc() == truth->nc());
  69.             //I set the label(truth) as
  70.             //[high_res_img - low_res_img] == [truth residual_img].
  71.             auto const error_residual = *truth - predict_residual;
  72.             auto const eu_dist = sum(pointwise_multiply(error_residual, error_residual));
  73.             //after derivative, gradient of euclidean distance are
  74.             //input - reference, since output_tensor is a mini batch
  75.             //I sum all of them together
  76.             update_gradient += error_residual;
  77.             loss += eu_dist;
  78.             ++truth;
  79.         }
  80.         //I take the average as the gradient value
  81.         update_gradient /= output_tensor.num_samples();
  82.         auto *grad_ptr = grad.host_write_only();
  83.         std::copy(&update_gradient(0), &update_gradient(0) + update_gradient.size(),
  84.                   grad_ptr);
  85.  
  86.         return loss / 2.0 / output_tensor.num_samples();
  87.     }
  88.  
  89.     friend void serialize(const loss_euclidean_dist_& , std::ostream& out)
  90.     {
  91.         serialize("loss_euclidean_dist_", out);
  92.     }
  93.  
  94.     friend void deserialize(loss_euclidean_dist_& , std::istream& in)
  95.     {
  96.         std::string version;
  97.         deserialize(version, in);
  98.         if (version != "loss_euclidean_dist_")
  99.             throw serialization_error("Unexpected version found while deserializing dlib::loss_euclidean_dist_.");
  100.     }
  101.  
  102.     friend std::ostream& operator<<(std::ostream& out, const loss_euclidean_dist_& )
  103.     {
  104.         out << "loss_euclidean_dist_";
  105.         return out;
  106.     }
  107.  
  108.     friend void to_xml(const loss_euclidean_dist_&, std::ostream& out)
  109.     {
  110.         out << "<loss_euclidean_dist_/>";
  111.     }
  112.  
  113. };
  114.  
  115. template <typename SUBNET>
  116. using loss_euclidean_dist = add_loss_layer<loss_euclidean_dist_, SUBNET>;//*/
  117.  
  118. }
  119.  
  120. #endif // EUCLIDEAN_LOST_HPP
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement