
gz_nn_minibatch.py
By: a guest on
Dec 30th, 2013 | syntax:
Python | size: 1.65 KB | views:
660 | expires: Never
from pybrain.datasets import ClassificationDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
from sklearn import datasets
import random
# load the data from sklearn
iris = datasets.load_iris()
X = iris['data']
y = iris['target']
# set up the dataset. 4 input features, 3 output classes
all_inds = range(X.shape[0])
# build the network
fnn = buildNetwork(4, 10, 3, outclass=SoftmaxLayer, bias=True)
trainer = BackpropTrainer(fnn, momentum=0.1, verbose=True, weightdecay=0.01, learningrate=0.01)
# repeat the batch training several times
for i in xrange(200):
# get a random order for the training examples for batch gradient descent
random.shuffle(all_inds)
# split the indexes into lists with the indices for each batch
batch_inds = [all_inds[i:i+10] for i in xrange(0, len(all_inds), 10)]
# train on each batch
for inds in batch_inds:
# rebuild the dataset
ds = ClassificationDataSet(4, nb_classes=3)
for x_i, y_i in zip(X[inds, :], y[inds]):
ds.appendLinked(x_i, y_i)
ds._convertToOneOfMany()
# train on the current batch
trainer.trainOnDataset(ds)
# make a dataset with all the iris data
ds_all = ClassificationDataSet(4, nb_classes=3)
for x_i, y_i in zip(X, y):
ds_all.appendLinked(x_i, y_i)
ds_all._convertToOneOfMany()
# test the result
# Note that we are testing on our training data, which is bad practice,
# but it does demonstrate the network is trained
print sum(fnn.activateOnDataset(ds_all).argmax(axis=1) == y)/float(len(y))