Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- subroutine mlrmodel(fvec,n,d,m,c,cgrad)
- implicit none
- integer, intent(in) :: n,d,m !training data sizes and number of classes
- real(kind=8), dimension((m-1)*(n+1)), intent(in) :: fvec !fitting parameters
- real(kind=8), intent(out) :: c !cost
- real(kind=8), dimension((m-1)*(n+1)), intent(out) :: cgrad !gradient of cost
- integer :: i1,j1,i,j,k,y, bias_column
- real(kind=8), dimension(m-1,n) :: w
- real(kind=8), dimension(m-1) :: b
- !Declare other variables as needed
- real(kind=8), dimension(n) :: x
- real(kind=8), dimension(m-1) :: z, a
- bias_column = (m-1) * (n)
- !unpack fitting parameters (use if needed)
- do i1=1,n
- j1 = (i1-1)*(m-1)+1
- w(:,i1) = fvec(j1:j1+m-2) !weight matrix
- end do
- b = fvec((m-1)*n+1:(m-1)*(n+1)) !bias vector
- ! Add cost and gradient contributions from each training data
- do k = 1, d
- ! Fetch required training input and label
- x = lr_x(:, k)
- y = lr_y(k)
- a = compute_mlr_a(w, b, x, n, m)
- ! Compute gradient and cost
- do i = 1, m - 1
- if (y == i) then
- ! Cost log term
- c = c - safe_log(a(i))
- ! Bias gradient with y == i
- cgrad(bias_column + i) = cgrad(bias_column + i) - (1 - a(i))
- ! Weight gradient y == i
- do j = 1, n
- cgrad(j * (m - 1) + i) = cgrad(j * (m - 1) + i) - (1 - a(i)) * x(j)
- end do
- else
- ! Bias gradient i /= j, y == j
- cgrad(i + bias_column) = cgrad(i + bias_column) + a(y)
- ! Weight gradient i /= j, y == j
- do j = 1, n
- cgrad(j * (m - 1) + i) = cgrad(j * (m - 1) + i) + a(i) * x(j)
- end do
- end if
- end do
- end do
- do i = 1, n
- do j = 1, m - 1
- ! Add penalty terms
- cgrad(i * (m - 1) + j) = cgrad(i * (m-1) + j) + w(j, i)
- c = c + lr_lambda * w(j,i) * w(j,i)
- end do
- end do
- end subroutine mlrmodel
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement