1. #Import plotter
  2.  
  3. import pylab
  4. import numpy
  5.  
  6.  
  7.  
  8. from pybrain.datasets import ClassificationDataSet
  9.  
  10. from pybrain.supervised.trainers import BackpropTrainer
  11.  
  12. from pybrain.structure.modules import LinearLayer, SigmoidLayer, SoftmaxLayer
  13.  
  14. from pybrain.structure import FullConnection, FeedForwardNetwork, BiasUnit
  15.  
  16.  
  17.  
  18. #Dataset Filename
  19.  
  20. filename = 'irisN.dat'
  21.  
  22.  
  23.  
  24. #Output Data
  25.  
  26. TrainingPoints = []
  27.  
  28. TestPoints = []
  29.  
  30. xAxis = []
  31.  
  32.  
  33.  
  34. #Parameters
  35.  
  36. INPUT = 4 #No of input dimensions
  37.  
  38. OUTPUT = 3 #No of Output Class
  39.  
  40. HIDDEN0 = 3 #No of Hidden Neurons 1st layer
  41.  
  42. HIDDEN1 = 2 #Second Layer
  43.  
  44. LEARNING_RATE = 0.05
  45.  
  46. MOMENTUM = 0.0
  47.  
  48. IMPROVEMENT_EPOCHS = 3 #No of training cycles used
  49.  
  50. WEIGHT_DECAY = 0.0
  51.  
  52. BIAS = True
  53.  
  54. #Data structures
  55. AccuracyData = []
  56.  
  57.  
  58.  
  59. myDataset = ClassificationDataSet(INPUT, OUTPUT)
  60.  
  61.  
  62.  
  63. #Configure input file (text) into input list and output list
  64.  
  65. def readMyData(filename):
  66.  
  67. global myDataset
  68.  
  69. file = open(filename, 'r')
  70.  
  71. for line in file.readlines():
  72.  
  73. L = line.split(" ")
  74.  
  75. inSample = []
  76.  
  77. outSample = []
  78.  
  79. for i in range(INPUT):
  80.  
  81. inSample.append(float(L[i]))
  82.  
  83. for j in range(OUTPUT):
  84.  
  85. outSample.append(float(L[j+INPUT]))
  86.  
  87. myDataset.addSample(inSample,outSample)
  88.  
  89.  
  90.  
  91. #current Error Measure - Youcould add your own
  92.  
  93. def SumSquareError(Actual, Desired):
  94.  
  95. error = 0.
  96.  
  97. for i in range(len(Desired)):
  98.  
  99. for j in range(len(Desired[i])):
  100.  
  101. error = error + ((Actual[i])[j] - (Desired[i])[j])*((Actual[i])[j] - (Desired[i])[j])
  102.  
  103. return error
  104.  
  105. def MeanSquareError(Actual, Desired):
  106. error = 0.
  107. for i in range(len(Desired)):
  108. for j in range(len(Desired[i])):
  109. error = error + ((Actual[i])[j] - (Desired[i])[j])*((Actual[i])[j] - (Desired[i])[j])
  110. error = error / len(Desired)
  111. return error
  112.  
  113.  
  114.  
  115. for LEARNING_RATE in range(0, 101, 10):
  116.  
  117. #Load float format iris dataset
  118.  
  119. readMyData(filename)
  120.  
  121.  
  122.  
  123. #create baseline network
  124.  
  125. network = FeedForwardNetwork()
  126.  
  127.  
  128.  
  129. # Add a bias if desired
  130.  
  131. if BIAS:
  132.  
  133. bias = BiasUnit()
  134.  
  135. network.addModule(bias)
  136.  
  137.  
  138.  
  139. #Build Architecture
  140.  
  141. inLayer = LinearLayer(INPUT)
  142.  
  143. hiddenLayer0 = SigmoidLayer(HIDDEN0)
  144.  
  145. hiddenLayer1 = SigmoidLayer(HIDDEN1)
  146.  
  147. outLayer = SoftmaxLayer(OUTPUT)
  148.  
  149. network.addInputModule(inLayer)
  150.  
  151. network.addModule(hiddenLayer0)
  152.  
  153. network.addModule(hiddenLayer1)
  154.  
  155. network.addOutputModule(outLayer)
  156.  
  157.  
  158.  
  159. #Make connections
  160.  
  161. in_to_hidden = FullConnection(inLayer, hiddenLayer0)
  162.  
  163. hidden_to_hidden = FullConnection(hiddenLayer0, hiddenLayer1)
  164.  
  165. hidden_to_out = FullConnection(hiddenLayer1, outLayer)
  166.  
  167. network.addConnection(in_to_hidden)
  168.  
  169. network.addConnection(hidden_to_hidden)
  170.  
  171. network.addConnection(hidden_to_out)
  172.  
  173.  
  174.  
  175. #initialize
  176.  
  177. network.sortModules()
  178.  
  179.  
  180.  
  181. #split the data randomly into 90% training, 10% test
  182.  
  183. testData, trainData = myDataset.splitWithProportion(0.1)
  184.  
  185.  
  186.  
  187. #create the trainer environment for backprop and train network
  188.  
  189. trainer = BackpropTrainer(network, dataset = trainData, learningrate = LEARNING_RATE/100.0, momentum=MOMENTUM, weightdecay=WEIGHT_DECAY)
  190.  
  191.  
  192. pylab.close()
  193. pylab.figure()
  194.  
  195. epochCounter = 0
  196. trnresultsum = 0
  197. tstresultsum = 0
  198. print "Learning rate: " + str(LEARNING_RATE)
  199. while epochCounter < IMPROVEMENT_EPOCHS:
  200.  
  201. #for i in range(TrainingEpochs):
  202.  
  203. trainer.trainEpochs(1)
  204.  
  205.  
  206.  
  207. #Print Current Errors (comment out when not needed)
  208.  
  209. trnresult = SumSquareError(network.activateOnDataset(dataset=trainData), trainData['target'])
  210.  
  211. tstresult = SumSquareError(network.activateOnDataset(dataset=testData), testData['target'])
  212.  
  213. #print "epoch: %4d" % trainer.totalepochs, \
  214.  
  215. # " train error: %5.2f" % trnresult, \
  216.  
  217. # " test error: %5.2f" % tstresult
  218.  
  219.  
  220. if trainer.totalepochs == 1:
  221. lowTrnResult = trnresult
  222. lowTstResult = tstresult
  223. elif (trnresult < lowTrnResult) or (tstresult < lowTstResult):
  224. if trnresult < lowTrnResult:
  225. lowTrnResult = trnresult
  226. if tstresult < lowTstResult:
  227. lowTstResult = tstresult
  228. epochCounter = 0
  229. else:
  230. epochCounter += 1
  231. trnresultsum += trnresult
  232. tstresultsum += tstresult
  233. #print epochCounter
  234.  
  235. #Build Lists for plotting
  236.  
  237. TrainingPoints.append(trnresult)
  238.  
  239. TestPoints.append(tstresult)
  240.  
  241. xAxis.append(trainer.totalepochs - 1)
  242.  
  243. #Print results
  244. AccuracyData.append([lowTstResult, lowTrnResult, trnresultsum / 20, tstresultsum / 20])
  245. print "It took %4d" % trainer.totalepochs, "epochs to reach train error of %5.2f" % lowTrnResult, "and test error of %5.2f" %lowTstResult
  246. print "Average train error was " + str(trnresultsum / 20) + " and average test error was " + str(tstresultsum / 20)
  247.  
  248.  
  249.  
  250. #Compare actual test results (comment out when not needed)
  251.  
  252. actualTestOutput = network.activateOnDataset(dataset=testData)
  253.  
  254. desiredTestOutput = testData['target']
  255.  
  256. #print "Actual vs Desired Test Values"
  257.  
  258. #for m in range(len(actualTestOutput)):
  259.  
  260. # print
  261.  
  262. # for n in range(len(actualTestOutput[m])):
  263.  
  264. # print "%2.1f" % float(actualTestOutput[m][n]), '\t',
  265.  
  266. # print desiredTestOutput[m][n]
  267.  
  268.  
  269. # ATTEMPT ONE
  270. # Create new figure.
  271. # fig = pylab.figure()
  272. # Create new axes
  273. # rect = [0.5, 0.5, 0.5, 0.5]
  274. # ax = fig.add_subplot(111)
  275. # ax.hold(True)
  276. # ax.set_anchor('SW')
  277. # ax.set_position([0, 0, 1, 1])
  278. # Plot data.
  279. # ax.plot(xAxis, TrainingPoints, 'b-')
  280. # ax.plot(xAxis, TestPoints, 'r-')
  281. # ax.set_title('Plot of Iris Training Errors')
  282. # ax.set_xlabel('Epochs')
  283. # ax.set_ylabel('Sum Squared Error')
  284.  
  285. # EXAMPLE CODE I WAS PROVIDED WITH
  286. # pylab.figure()
  287.  
  288. # pylab.plot(xAxis, TrainingPoints, 'b-')
  289.  
  290. # pylab.plot(xAxis, TestPoints, 'r-')
  291.  
  292. # pylab.xlabel('Epochs')
  293.  
  294. # pylab.ylabel('Sum Squared Error')
  295.  
  296. # pylab.title('Plot of Iris Training Errors')
  297.  
  298. # LATEST ATTEMPT
  299. # Create new figure.
  300. fig = pylab.figure()
  301. # Clear figure.
  302. fig.clf()
  303. # Add new axis.
  304. ax = fig.add_subplot(1, 1, 1)
  305. for lines in ax.lines:
  306. ax.lines.remove(line)
  307. print ax.lines
  308. # Plot data.
  309. TrainLine, = ax.plot(xAxis, TrainingPoints, 'b-')
  310. TestLine, = ax.plot(xAxis, TestPoints, 'r-')
  311. ax.set_title('Plot of Iris Training Errors')
  312. ax.set_xlabel('Epochs')
  313. ax.set_ylabel('Sum Squared Error')
  314.  
  315. # Save image.
  316. outfilename = str(int(LEARNING_RATE)) + ".png"
  317. print outfilename
  318. # pylab.ylim(ymin=0)
  319. pylab.savefig(outfilename)
  320. ax.lines.remove(TrainLine)
  321. ax.lines.remove(TestLine)
  322. # pylab.cla()
  323. # pylab.clf()
  324. print AccuracyData