Guest User

Untitled

a guest
Jan 22nd, 2018
63
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.70 KB | None | 0 0
  1. module A
  2.  
  3. using Optim
  4. import Optim: FirstOrderOptimizer, initial_state, update_state!, trace!, assess_convergence, AbstractOptimizerState, update!, value
  5.  
  6. struct MinimalGradientDescent <: FirstOrderOptimizer
  7. η::Float64
  8. end
  9.  
  10. MinimalGradientDescent(; η=1e-1) = MinimalGradientDescent(η)
  11.  
  12. type MinimalGradientDescentState{T,N} <: AbstractOptimizerState
  13. x::Array{T,N}
  14. x_previous::Array{T,N}
  15. f_x_previous::T
  16. end
  17.  
  18. function initial_state(method::MinimalGradientDescent, options, d, initial_x)
  19. # prepare cache variables etc here
  20. MinimalGradientDescentState(initial_x,initial_x,Inf)
  21. end
  22.  
  23. function update_state!{T}(d, state::MinimalGradientDescentState{T}, method::MinimalGradientDescent)
  24.  
  25. state.x += -method.η * gradient(d)
  26.  
  27. false # should the procedure force quit?
  28. end
  29.  
  30. function trace!(tr, d, state, iteration, method::MinimalGradientDescent, options)
  31. dt = Dict()
  32. if options.extended_trace
  33. dt["x"] = copy(state.x)
  34. dt["g(x)"] = copy(gradient(d))
  35. end
  36. g_norm = vecnorm(gradient(d), Inf)
  37. update!(tr,
  38. iteration,
  39. value(d),
  40. g_norm,
  41. dt,
  42. options.store_trace,
  43. options.show_trace,
  44. options.show_every,
  45. options.callback)
  46. end
  47.  
  48. function assess_convergence(state::MinimalGradientDescentState, d, options)
  49. Optim.default_convergence_assessment(state, d, options)
  50. end
  51.  
  52. end
  53.  
  54.  
  55. f = x -> sum(x.^2) + π
  56. mfit = optimize(f,rand(2),A.MinimalGradientDescent(),Optim.Options(iterations=500,store_trace=true,extended_trace=true))
Add Comment
Please, Sign In to add comment