Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- using DifferentialEquations, Flux, Optim, DiffEqFlux, Plots
- function lotka_volterra(du,u,p,t)
- x, y = u
- α, β, δ, γ = p
- du[1] = dx = α*x - β*x*y
- du[2] = dy = -δ*y + γ*x*y
- end
- u0 = [1.0,1.0]
- tspan = (0.0,10.0)
- p = [1.5,1.0,3.0,1.0]
- prob = ODEProblem(lotka_volterra,u0,tspan,p)
- sol = solve(prob,Tsit5(),saveat=0.1)
- using Plots
- plot(sol)
- measurements = Array(sol)
- function predict_adjoint(p) # Our 1-layer neural network
- Array(concrete_solve(prob,Tsit5(),u0,p,saveat=0.0:0.1:10.0))
- end
- function loss_adjoint(p)
- prediction = predict_adjoint(p)
- loss = sum(abs2,prediction-measurements)
- loss,prediction
- end
- function negative_loss_adjoint(p)
- prediction = predict_adjoint(p)
- loss = sum(abs2,prediction-measurements)
- loss,prediction
- end
- cb = function (p,l,pred) #callback function to observe training
- display(l)
- # using `remake` to re-create our `prob` with current parameters `p`
- a = plot(solve(remake(prob,p=p),Tsit5()),ylim=(0,6))
- plot!(a,(0.0:0.1:10.0,measurements[1,:]),label="rabbit data")
- display(plot!(a,(0.0:0.1:10.0,measurements[2,:]),label="wolves data"))
- return false # Tell it to not halt the optimization. If return true, then optimization stops
- end
- guess = [1.15814, 1.898547, 3.26339, 0.443906]
- # Display the ODE with the initial parameter values.
- cb(guess,loss_adjoint(guess)...)
- prescan = DiffEqFlux.sciml_train(loss_adjoint, guess, Optim.ParticleSwarmState(), cb = cb, maxiters = 1000)
- refined_guess = prescan.minimizer
- cb(refined_guess,loss_adjoint(refined_guess)...)
- res = DiffEqFlux.sciml_train(negative_loss_adjoint, refined_guess, ADAM(0.1), cb = cb, maxiters = 1000)
- cb(res.minimizer,loss_adjoint(res.minimizer)...)
- plot(solve(remake(prob,p=res.minimizer),Tsit5(),saveat=0.0:0.1:10.0),ylim=(0,6))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement