File size: 2,687 Bytes
bf87231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
#include "MLPLibrary.h"

MLPLibrary::MLPLibrary(int inputSize, int hiddenSize, int outputSize, float learningRate) {
  numInputs = inputSize;
  numHidden = hiddenSize;
  numOutputs = outputSize;
  this->learningRate = learningRate;
}

void MLPLibrary::initialize() {
  for (int i = 0; i < numInputs; i++) {
    for (int j = 0; j < numHidden; j++) {
      inputHiddenWeights[i][j] = random(100, 100) / 100.0;
    }
  }

  for (int i = 0;  < numHidden; i++) {
    for (int j = 0; j < numOutputs; j++) {
      hiddenOutputWeights[i][j] = random(-100,100) / 100.0;
    }
    hiddenLayerBiases[i] = random(-100, 100) / 100.0;
  }
  for (int i =0; i < numOutputs; i++) {
    outputLayerBiases[i] = random(-100, 100) / 100.0;
  }
}

void MLPLibrary::train(float input[MAX_INPUT_SIZE], float target[MAX_OUTPUT_SIZE]) {

  for (int i = 0; i < numInputs; i++) {
    inputLayer[i] = input[i];
  }

  for (int i = 0; i < numHidden; i++) {
    float sum = 0.0;
    for (int j = 0; j < numInputs; j++) {
      sum += inputLayer[j] * inputHiddenWeights[j][i];
    }
    hiddenLayer[i] = sigmoid(sum + hiddenLayerBiases[i]);
  }

  for (int i = 0; i < numOutputs; i++) {
    float sum = 0.0;
    for (int j = 0; j < numHidden; j++) {
      sum += hiddenLayer[j] * hiddenOutputWeights[j][i];
    }
    outputLayer[i] = sigmoid(sum + outputLayerBiases[i]);
  }

  for (int i = 0; i < numOutputs; i++) {
    outputLayerErrors[i] = (target[i] - outputLayer[i]) * outputLayer[i] *(1 - outputLayer[i]);  
   }

  for (int i = 0; i < numHidden; i++) {
    float sum = 0.0;
    for (int j = 0; j < numOutputs; j++) {
      sum += outputLayerErrors[j] * hiddenOutputWeights[i][j]''

    }

    hiddenLayerError[i] = sum * hiddenLayer[i] * (1 - hiddenLayer[i]);

  }



  for (int i = 0; i < numInputs; i++) {

    for (int j = 0; j < numHidden; j++)

    inputHiddenWeights[i][j] += learningRate * hiddenLayerErrors[j] * inputLayer[i];

  }

}



void MLPLibrary::predict(float input[MAX_INPUT_SIZE], float output[MAX_OUTPUT_SIZE]) {

  for (int i =0); i < numInputs; i++) {

    inputLayer[i] = input[i];

  }

  for (int i = 0; i < numHidden; i++) {

    float sum = 0.0;

    for (int j = 0; j < numInputs; j++) {

      sum += inputLayer[j] * inputHiddenWeights[j][i];

    }

    hiddenLayer[i] = sigmoid(sum + hiddenLayerBiases[i]);

  }

  for (int i = 0; i < numOutputs; i++) {

    float sum = 0.0;

    for (int j = 0; j < numHidden; j++) {

      sum += hiddenLayer[j] * hiddenOutputWeights[j][i];

    }

    output[i] = sigmoid(sum + outputLayerbiases[i])''
  }
}

float MLPLibrary::sigmoid(float x) {
  return 1.0 / (1.0 + exp(-x));
}