Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- module mlp
- !Passing allocatable arrays is only allowed within a module.
- !Ditto with mixed privacy components of derived types.
- !Alloctable components of derived types is a 95 feature.
- !Look up- passing non-allocatable derived types with allocatable components.
- !Alos look up- passing allocatable derived types with allocatable components.
- !Only one part-reference is allowed to have nonzero rank when referencing data, for simplicity.
- !Also, an array of pointers cannot be accessed in one statement.
- implicit none
- !These interfaces permit function overloading.
- interface set_activation
- module procedure pset_activation, ffset_activation
- end interface
- interface set_beta
- module procedure pset_beta, ffset_beta
- end interface
- !Constants- represent activation functions
- integer, parameter :: threshold = 1, identity = 2, logistic = 3, hyperbolic = 4
- !Discriminant function for individual perceptrons
- type, public :: discriminant
- double precision, pointer :: weights(:) !Allocate data for this
- double precision :: bias
- integer, private :: act_func_type = logistic
- double precision, private :: beta = 1.0D0
- end type discriminant
- !Data structure of MLP nodes. Feedforward requires a doubly-linked
- !list for backpropogation, unless I'm creative with pointers.
- !Keep it simple for now. Id_no is for debugging.
- !fb_next and fb_prev are for possible expansion.
- type, public :: mlp_nodes
- type(discriminant), private :: disc_func
- type(mlp_nodes), private, pointer :: ff_next(:), ff_prev(:), fb_next(:), fb_prev(:)
- double precision, private :: output
- integer :: id_no
- end type mlp_nodes
- !Training parameters structure.
- type, public :: training_params
- integer :: v_checks = 6
- integer :: num_iterations = 1000
- double precision :: eta = 1.0D-2
- double precision :: alpha = 0.0
- double precision :: threshold = 1.0D-6
- !double precision :: mu
- end type training_params
- !Array of MLP layers.
- !This data structure makes it easier to address all nodes in a
- !single layer.
- type, public :: mlp_layers
- type(mlp_nodes), pointer :: curr_nodes(:)
- end type mlp_layers
- !Begin function definitions
- contains
- !Perceptron activation function
- double precision function activation(disc, features)
- double precision, intent(in) :: features(:)
- type(discriminant), intent(in) :: disc
- double precision :: disc_result
- disc_result = dot_product((/ disc%weights, disc%bias /), (/ features, 1.0D0 /))
- select case(disc%act_func_type)
- case(threshold)
- if(disc_result >= 0.0D0) then
- activation = 1.0D0
- else
- activation = 0.0D0
- end if
- case(identity)
- activation = disc_result
- case(logistic)
- activation = (1.0D0/(1.0D0 + dexp(-1.0D0 * disc%beta * disc_result)))
- case(hyperbolic)
- activation = ((2.0D0/(1.0D0 + dexp(-1.0D0 * disc%beta * disc_result))) - 1.0D0)
- case default
- activation = 0.0D0
- end select
- return
- end function activation
- !Overloaded private variable set routines for perceptron/mlp.
- subroutine pset_beta(disc, x)
- type(discriminant), intent(inout) :: disc
- double precision, intent(in) :: x
- disc%beta = x
- end subroutine
- subroutine pset_activation(disc, x)
- type(discriminant), intent(inout) :: disc
- integer, intent(in) :: x
- disc%act_func_type = x
- end subroutine
- subroutine ffset_beta(ffnet, x)
- type(mlp_layers), intent(inout) :: ffnet(:)
- double precision, intent(in) :: x
- integer :: i, j
- do i = 1, size(ffnet)
- do j = 1, size(ffnet(i)%curr_nodes)
- ffnet(i)%curr_nodes(j)%disc_func%beta = x
- end do
- end do
- end subroutine
- subroutine ffset_activation(ffnet, x)
- type(mlp_layers), intent(inout) :: ffnet(:)
- integer, intent(in) :: x
- integer :: i, j
- do i = 1, size(ffnet)
- do j = 1, size(ffnet(i)%curr_nodes)
- ffnet(i)%curr_nodes(j)%disc_func%act_func_type = x
- end do
- end do
- end subroutine
- !Feedforward Routines
- !FeedForward Allocation
- !Allocate space for a feedforward MLP. Nodes are NOT connected.
- subroutine ffalloc(layers, num_inputs, hl_array, num_outputs)
- integer, intent(in) :: num_inputs, hl_array(:), num_outputs
- type(mlp_layers), intent(out), allocatable :: layers(:)
- !Array indices in FORTRAN can be changed by using a pointer
- !to specific indices. This makes my life slightly easier
- !in the upcoming do-loop.
- !integer, pointer ::
- integer :: num_hl, num_layers, i, j, k = 1
- num_hl = size(hl_array)
- num_layers = num_hl+ 2
- !Allocate space for layer information
- allocate(layers(1:num_layers))
- !Then allocate space for each node (excluding weights).
- !First layer
- allocate(layers(1)%curr_nodes(num_inputs))
- !For the hidden layers
- do i = 1, num_hl, 1
- allocate(layers(i + 1)%curr_nodes(hl_array(i)))
- end do
- !Output layer
- allocate(layers(num_layers)%curr_nodes(num_outputs))
- !Then attach nodes to one another, and allocate space for
- !weights.
- do i = 1, num_layers, 1
- !For each node in the current layer...
- do j = 1, size(layers(i)%curr_nodes), 1
- !print *, layers(i)%curr_nodes(j)%id_no
- layers(i)%curr_nodes(j)%id_no = k
- !Attach the nodes from the previous layer as inputs.
- if (i /= 1) then
- layers(i)%curr_nodes(j)%ff_prev => layers(i - 1)%curr_nodes
- !Weights are allocated here.
- allocate(layers(i)%curr_nodes(j)%disc_func%weights(size(layers(i - 1)%curr_nodes)))
- else
- nullify(layers(i)%curr_nodes(j)%ff_prev)
- !Weights for the first layer are still allocated, but not used.
- !(I put logic to skip the dot product in the first layer).
- allocate(layers(i)%curr_nodes(j)%disc_func%weights(1))
- end if
- !Also connect the next layer of nodes to the current
- !node's output.
- if (i /= num_layers) then
- layers(i)%curr_nodes(j)%ff_next => layers(i + 1)%curr_nodes
- else
- nullify(layers(i)%curr_nodes(j)%ff_next)
- end if
- !For future expansion.
- nullify(layers(i)%curr_nodes(j)%fb_next)
- nullify(layers(i)%curr_nodes(j)%fb_prev)
- k = k + 1
- end do
- end do
- end subroutine ffalloc
- !Deallocate memory from neural network
- subroutine ffdealloc(layers)
- type(mlp_layers), intent(inout), allocatable :: layers(:)
- integer :: num_hl, num_layers, i, j, k, l = 1
- num_layers = size(layers)
- num_hl = num_hl - 2
- do i = 1, num_layers, 1
- !For each node in the current layer...
- do j = 1, size(layers(i)%curr_nodes), 1
- !Deallocate it's weights...
- if(associated(layers(i)%curr_nodes(j)%disc_func%weights)) then
- deallocate(layers(i)%curr_nodes(j)%disc_func%weights)
- end if
- !And nullify it's pointers
- if(associated(layers(i)%curr_nodes(j)%ff_next)) then
- nullify(layers(i)%curr_nodes(j)%ff_next)
- end if
- if(associated(layers(i)%curr_nodes(j)%ff_prev)) then
- nullify(layers(i)%curr_nodes(j)%ff_prev)
- end if
- if(associated(layers(i)%curr_nodes(j)%fb_prev)) then
- nullify(layers(i)%curr_nodes(j)%fb_prev)
- end if
- if(associated(layers(i)%curr_nodes(j)%fb_next)) then
- nullify(layers(i)%curr_nodes(j)%fb_next)
- end if
- !Objects will be destroyed, so setting to null is
- !probably not necessary, but done as a precaution.
- end do
- end do
- !After all nodes pointers are deallocated, deallocate the nodes
- !in each layer.
- do k = 1, num_layers, 1
- if(associated(layers(k)%curr_nodes)) then
- deallocate(layers(k)%curr_nodes)
- end if
- end do
- !Finally, deallocate the layers (which is an allocatable
- !array as opposed to pointer- allocated pointers are also
- !associated. Deallocated pointers are also nullified.
- if(allocated(layers)) then
- deallocate(layers)
- end if
- end subroutine ffdealloc
- !Train feedforward neural network
- subroutine fftrain(ffnet, inputs, validation, targets, params)
- type(mlp_layers), intent(inout) :: ffnet(:)
- double precision, intent(in) :: inputs(:,:)
- double precision, intent(in) :: validation(:,:)
- double precision, intent(in) :: targets(:,:)
- type(training_params), intent(in) :: params
- !double precision, allocatable :: validation(:,:)
- !Row- layer number, column- vector component.
- double precision, pointer :: sensitivities(:,:)
- integer :: i, j, k, seed
- write(*,'((F8.5,1X))') inputs(2,:)
- call system_clock(seed)
- !Randomly initialize weights.
- do i = 1, size(ffnet), 1
- end do
- return
- !Batch gradient descent
- !For the required number of iterations
- !do i = 1:params%num_iterations
- !do j
- !end do
- end subroutine fftrain
- !Classify using neural network
- subroutine ffsim(ffnet, test_vector, output_vector)
- type(mlp_layers), intent(inout) :: ffnet(:)
- double precision, intent(in) :: test_vector(:)
- double precision, intent(out) :: output_vector(:)
- integer :: i, j, k, num_layers
- num_layers = size(ffnet)
- ffnet(1)%curr_nodes(:)%output = test_vector
- !print *, 'Input Values: ', ffnet(1)%curr_nodes(:)%output
- !Propogate the input to the output by traversing each layer,
- !using the outputs of the previous layer as inputs to the next.
- k = size(test_vector) + 1
- do i = 2,num_layers,1
- do j = 1, size(ffnet(i)%curr_nodes), 1
- !print *, 'Input to node ', k, ': ', ffnet(i)%curr_nodes(j)%ff_prev(:)%output
- ffnet(i)%curr_nodes(j)%output = &
- activation(ffnet(i)%curr_nodes(j)%disc_func, &
- ffnet(i)%curr_nodes(j)%ff_prev(:)%output)
- k = k + 1
- end do
- end do
- output_vector = ffnet(num_layers)%curr_nodes(:)%output
- end subroutine ffsim
- !Load created MLP and associated weights from a file. For
- !simplicity, reserves UNIT 11 for now.
- subroutine ffrestore(ffnet, filename)
- type(mlp_layers), allocatable, intent(out) :: ffnet(:)
- character(len=*), intent(in) :: filename
- !Version constant
- character(len=*), parameter :: version = 'V0.1'
- integer :: unit_no = 11
- !Variables
- integer :: num_hl, num_layers, i, j, k, current_node, act_fun, &
- num_weights_curr_node
- integer, allocatable :: nodes_per_layer(:)
- character(len=128) :: buffer
- character(len=10) :: format_str
- double precision :: beta
- open(unit = unit_no, file = filename)
- !Read header
- read(unit_no, '(A10)'), buffer
- if(buffer(1:10) /= ('NNTXT ' // version)) then
- return
- end if
- !Read NUM_HL
- read(unit_no, '(/A8,I5.1)'), buffer, num_hl
- if(buffer(1:8) /= ('NUM_HL: ')) then
- return
- end if
- num_layers = num_hl + 2
- allocate(nodes_per_layer(num_layers))
- !Read NUM_INPUTS
- read(unit_no, '(A12,I5.1)'), buffer, nodes_per_layer(1)
- if(buffer(1:12) /= ('NUM_INPUTS: ')) then
- return
- end if
- !Read NUM_OUTPUTS
- read(unit_no, '(A13,I5.1)'), buffer, nodes_per_layer(num_layers)
- if(buffer(1:13) /= ('NUM_OUTPUTS: ')) then
- return
- end if
- write(format_str, '(I5.1)'), num_hl
- read(unit_no, '(A11,' // format_str // '(I5.1))'), buffer, nodes_per_layer(2:(num_layers - 1))
- if(buffer(1:11) /= ('HL_VECTOR: ')) then
- return
- end if
- !Now allocate space, since sufficient information is available.
- call ffalloc(ffnet, nodes_per_layer(1), &
- nodes_per_layer(2:(num_layers - 1)), nodes_per_layer(num_layers))
- !Connect/destroy connections to nodes (not necessary for MLP).
- read(unit_no, '(/A12)'), buffer
- if(buffer(1:12) /= ('CONNECTIONS: ')) then
- return
- end if
- do i = 1,sum(nodes_per_layer(1:(num_layers - 1))),1
- read(unit_no, '()')
- end do
- !Assign activation functions for nodes.
- read(unit_no, '(/A11)'), buffer
- if(buffer(1:11) /= ('ACTIVATION: ')) then
- return
- end if
- k = 1 + nodes_per_layer(1)
- do i = 2,num_layers,1
- do j = 1, size(ffnet(i)%curr_nodes), 1
- read(unit_no, '(I5.1,1X,I1,1X,D25.17)'), current_node, act_fun, beta
- !print *, current_node, act_fun, beta
- if(current_node /= k) then
- return
- end if
- call set_activation(ffnet(i)%curr_nodes(j)%disc_func, act_fun)
- call set_beta(ffnet(i)%curr_nodes(j)%disc_func, beta)
- !print *, ffnet(i)%curr_nodes(j)%disc_func%act_func_type
- k = k + 1
- end do
- end do
- read(unit_no, '(/A9)'), buffer
- if(buffer(1:9) /= ('WEIGHTS: ')) then
- return
- end if
- !Assign weights
- k = 1 + nodes_per_layer(1)
- do i = 2,num_layers,1
- do j = 1, size(ffnet(i)%curr_nodes), 1
- num_weights_curr_node = size(ffnet(i)%curr_nodes(j)%ff_prev)
- print *, k
- write(format_str, '(I5.1)'), num_weights_curr_node
- read(unit_no, '(I5.1,' // format_str // '(D25.17, 1X))'), &
- current_node, ffnet(i)%curr_nodes(j)%disc_func%weights
- if(current_node /= k) then
- return
- end if
- k = k + 1
- end do
- end do
- read(unit_no, '(/A6)'), buffer
- if(buffer(1:6) /= ('BIAS: ')) then
- return
- end if
- !Assign bias
- k = 1 + nodes_per_layer(1)
- do i = 2,num_layers,1
- do j = 1, size(ffnet(i)%curr_nodes), 1
- num_weights_curr_node = size(ffnet(i)%curr_nodes(j)%ff_prev)
- !print *, k
- read(unit_no, '(I5.1, D25.17)'), &
- current_node, ffnet(i)%curr_nodes(j)%disc_func%bias
- if(current_node /= k) then
- return
- end if
- k = k + 1
- end do
- end do
- close(unit_no)
- end subroutine ffrestore
- !Save created MLP and associated weights to a file for later
- !use.
- subroutine ffsave(ffnet, filename)
- type(mlp_layers), allocatable, intent(in) :: ffnet
- character(len=*), intent(in) :: filename
- end subroutine ffsave
- !Debug information routines
- subroutine ffdebug_node(ffnet)
- type(mlp_layers), intent(in) :: ffnet(:)
- integer :: num_hl, num_layers, i, j, k
- num_layers = size(ffnet)
- num_hl = num_layers - 2
- print *, 'Debugging Information- Node Structure'
- print *, 'MLP Number Layers: ', num_layers
- do i = 1, size(ffnet), 1
- print *, 'Nodes in Layer ', i, ':', size(ffnet(i)%curr_nodes)
- end do
- print *, ''
- do j = 1, size(ffnet(1)%curr_nodes), 1
- print *, 'Input Layer, Node ', j, ' Assigned ID: ', ffnet(1)%curr_nodes(j)%id_no
- print *, 'Current node points to: ', ffnet(1)%curr_nodes(j)%ff_next(:)%id_no
- print *, ''
- end do
- do i = 1, num_hl, 1
- !For each node in the current layer...
- do j = 1, size(ffnet(i + 1)%curr_nodes), 1
- !print *, layers(i)%curr_nodes(j)%id_no
- !Attach the nodes from the previous layer as inputs.
- !This statement cannot be vectorized, as you cannot
- !assign an array of values to an array of pointers in a derived
- !type in F90.
- !do k = 1, size(layers(i - 1)%curr_nodes), 1
- print *, 'Layer ', i + 1, ', Node ', j, ' Assigned ID: ', ffnet(i + 1)%curr_nodes(j)%id_no
- print *, 'Current node points to: ', ffnet(i + 1)%curr_nodes(j)%ff_next(:)%id_no
- print *, 'Current node pointed to by: ', ffnet(i + 1)%curr_nodes(j)%ff_prev(:)%id_no
- print *, 'Weights: ', ffnet(i + 1)%curr_nodes(j)%disc_func%weights
- print *, 'Bias: ', ffnet(i + 1)%curr_nodes(j)%disc_func%bias
- print *, 'Activation: ', ffnet(i + 1)%curr_nodes(j)%disc_func%act_func_type
- print *, ''
- !end do
- !k = k + 1
- end do
- end do
- do j = 1, size(ffnet(num_layers)%curr_nodes), 1
- print *, 'Ouput Layer, Node ', j, ' Assigned ID: ', ffnet(num_layers)%curr_nodes(j)%id_no
- print *, 'Current node pointed to by: ', ffnet(num_layers)%curr_nodes(j)%ff_prev(:)%id_no
- print *, 'Weights: ', ffnet(num_layers)%curr_nodes(j)%disc_func%weights
- print *, 'Bias: ', ffnet(num_layers)%curr_nodes(j)%disc_func%bias
- print *, 'Activation: ', ffnet(num_layers)%curr_nodes(j)%disc_func%act_func_type
- print *, ''
- end do
- end subroutine ffdebug_node
- end module mlp
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement