2018-03-29 22:33:19 +03:00
|
|
|
![](img/logo.PNG)
|
2018-03-27 05:09:24 +03:00
|
|
|
|
2018-03-31 02:52:20 +03:00
|
|
|
Tinn (Tiny Neural Network) is a 200 line dependency free neural network library written in C99.
|
2018-03-27 05:09:24 +03:00
|
|
|
|
2018-03-30 00:32:11 +03:00
|
|
|
#include "Tinn.h"
|
|
|
|
#include <stdio.h>
|
|
|
|
|
2018-04-11 04:37:19 +03:00
|
|
|
#define SETS 4
|
|
|
|
#define NIPS 2
|
|
|
|
#define NHID 8
|
|
|
|
#define NOPS 1
|
|
|
|
#define ITER 2000
|
|
|
|
#define RATE 1.0f
|
2018-03-30 00:32:11 +03:00
|
|
|
|
2018-03-30 23:26:46 +03:00
|
|
|
int main()
|
2018-03-30 00:32:11 +03:00
|
|
|
{
|
2018-04-11 04:37:19 +03:00
|
|
|
float in[SETS][NIPS] = {
|
|
|
|
{ 0, 0 },
|
|
|
|
{ 0, 1 },
|
|
|
|
{ 1, 0 },
|
|
|
|
{ 1, 1 },
|
|
|
|
};
|
|
|
|
float tg[SETS][NOPS] = {
|
|
|
|
{ 0 },
|
|
|
|
{ 1 },
|
|
|
|
{ 1 },
|
|
|
|
{ 0 },
|
|
|
|
};
|
|
|
|
// Build.
|
|
|
|
const Tinn tinn = xtbuild(NIPS, NHID, NOPS);
|
|
|
|
// Train.
|
|
|
|
for(int i = 0; i < ITER; i++)
|
2018-03-30 00:32:11 +03:00
|
|
|
{
|
2018-04-11 04:37:19 +03:00
|
|
|
float error = 0.0f;
|
|
|
|
for(int j = 0; j < SETS; j++)
|
|
|
|
error += xttrain(tinn, in[j], tg[j], RATE);
|
|
|
|
printf("%.12f\n", error / SETS);
|
2018-03-30 00:32:11 +03:00
|
|
|
}
|
2018-04-11 04:37:19 +03:00
|
|
|
// Predict.
|
|
|
|
for(int i = 0; i < SETS; i++)
|
|
|
|
{
|
|
|
|
const float* pd = xtpredict(tinn, in[i]);
|
|
|
|
printf("%f :: %f\n", tg[i][0], (double) pd[0]);
|
|
|
|
}
|
|
|
|
// Cleanup.
|
2018-03-30 00:32:11 +03:00
|
|
|
xtfree(tinn);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-30 23:12:51 +03:00
|
|
|
For a quick demo, get some training data:
|
2018-03-30 23:10:23 +03:00
|
|
|
|
|
|
|
wget http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data
|
|
|
|
|
2018-03-30 23:12:51 +03:00
|
|
|
And if you're on Linux / MacOS just build and run:
|
2018-03-27 05:09:24 +03:00
|
|
|
|
2018-03-29 08:08:19 +03:00
|
|
|
make; ./tinn
|
2018-03-30 23:12:51 +03:00
|
|
|
|
|
|
|
If you're on Windows it's:
|
|
|
|
|
|
|
|
mingw32-make & tinn.exe
|
2018-04-11 04:37:19 +03:00
|
|
|
|
|
|
|
The training data consists of hand written digits written both slowly and quickly.
|
|
|
|
Each line in the data set corresponds to one handwritten digit. Each digit is 16x16 pixels in size
|
|
|
|
giving 256 inputs to the neural network.
|
|
|
|
|
|
|
|
At the end of the line 10 digits signify the digit:
|
|
|
|
|
|
|
|
0: 1 0 0 0 0 0 0 0 0 0
|
|
|
|
1: 0 1 0 0 0 0 0 0 0 0
|
|
|
|
2: 0 0 1 0 0 0 0 0 0 0
|
|
|
|
3: 0 0 0 1 0 0 0 0 0 0
|
|
|
|
4: 0 0 0 0 1 0 0 0 0 0
|
|
|
|
...
|
|
|
|
9: 0 0 0 0 0 0 0 0 0 1
|
|
|
|
|
|
|
|
This gives 10 outputs to the neural network. The test program will output the
|
|
|
|
accuracy for each digit. Expect above 99% accuracy for the correct digit, and
|
|
|
|
less that 1% accuracy for the other digits.
|