This commit is contained in:
Gustav Louw 2018-03-28 20:41:08 -07:00
parent 756e383bb5
commit dc8b1fecb5
3 changed files with 158 additions and 101 deletions

113
Tinn.c Normal file
View File

@ -0,0 +1,113 @@
#include "Tinn.h"
#include <stdlib.h>
#include <math.h>
static double error(Tinn t, double* T)
{
double error = 0.0;
int i;
for(i = 0; i < t.output; i++)
error += 0.5 * pow(T[i] - t.O[i], 2.0);
return error;
}
static void backpass(Tinn t, double* I, double* T, double rate)
{
int i, j, k;
double* X = t.W + t.hidden * t.inputs;
for(i = 0; i < t.inputs; i++)
{
double sum = 0.0;
for(k = 0; k < t.output; k++)
{
double a = t.O[k] - T[k];
double b = t.O[k] * (1 - t.O[k]);
double c = X[k * t.output + i];
sum += a * b * c;
}
for(j = 0; j < t.hidden; j++)
{
double a = sum;
double b = t.H[i] * (1 - t.H[i]);
double c = I[j];
t.W[i * t.hidden + j] -= rate * a * b * c;
}
}
for(i = 0; i < t.output; i++)
for(j = 0; j < t.hidden; j++)
{
double a = t.O[i] - T[i];
double b = t.O[i] * (1 - t.O[i]);
double c = t.H[j];
X[t.hidden * i + j] -= rate * a * b * c;
}
}
static double act(double net)
{
return 1.0 / (1.0 + exp(-net));
}
static void forepass(Tinn t, double* I)
{
int i, j;
const double B[] = { 0.35, 0.60 };
double* X = t.W + t.hidden * t.inputs;
for(i = 0; i < t.hidden; i++)
{
double sum = 0.0;
for(j = 0; j < t.inputs; j++)
{
double a = I[j];
double b = t.W[i * t.inputs + j];
sum += a * b;
}
t.H[i] = act(sum + B[0]);
}
for(i = 0; i < t.output; i++)
{
double sum = 0.0;
for(j = 0; j < t.hidden; j++)
{
double a = t.H[j];
double b = X[i * t.hidden + j];
sum += a * b;
}
t.O[i] = act(sum + B[1]);
}
}
double ttrain(Tinn t, double* I, double* T, double rate)
{
forepass(t, I);
backpass(t, I, T, rate);
return error(t, T);
}
Tinn tnew(int inputs, int output, int hidden)
{
Tinn t;
t.inputs = inputs;
t.output = output;
t.hidden = hidden;
t.H = (double*) calloc(hidden, sizeof(*t.H));
t.O = (double*) calloc(output, sizeof(*t.O));
t.W = (double*) calloc(hidden * (inputs + output), sizeof(*t.W));
t.W[0] = 0.15;
t.W[1] = 0.20;
t.W[2] = 0.25;
t.W[3] = 0.30;
t.W[4] = 0.40;
t.W[5] = 0.45;
t.W[6] = 0.50;
t.W[7] = 0.55;
return t;
}
void tfree(Tinn t)
{
free(t.W);
free(t.H);
free(t.O);
}

26
Tinn.h Normal file
View File

@ -0,0 +1,26 @@
#ifndef _TINN_H_
#define _TINN_H_
/*
* TINN - The tiny dependency free ANSI-C feed forward neural network
* library with one hidden layer back propogation support.
*/
typedef struct
{
double* O;
double* H;
double* W;
int output;
int hidden;
int inputs;
}
Tinn;
double ttrain(Tinn, double* I, double* T, double rate);
Tinn tnew(int inputs, int output, int hidden);
void tfree(Tinn);
#endif

120
test2.c
View File

@ -1,112 +1,30 @@
#include <stdlib.h>
#include "Tinn.h"
#include <stdio.h>
#include <math.h>
static double act(double net)
{
return 1.0 / (1.0 + exp(-net));
}
static void forepass(double* I, double* O, double* H, double* W, double* B, const int inputs, const int output, const int hidden)
{
double* X = W + hidden * inputs;
for(int i = 0; i < hidden; i++) { for(int j = 0; j < inputs; j++) H[i] += I[j] * W[i * inputs + j]; H[i] = act(H[i] + B[0]); }
for(int i = 0; i < output; i++) { for(int j = 0; j < hidden; j++) O[i] += H[j] * X[i * hidden + j]; O[i] = act(O[i] + B[1]); }
}
static void backpass(double* I, double* O, double* H, double* W, double* T, const int inputs, const int output, const int hidden, const double rate)
{
double* X = W + hidden * inputs;
for(int i = 0; i < output; i++)
for(int j = 0; j < hidden; j++)
X[2 * i + j] -= rate * ((O[i] - T[i]) * (O[i] * (1 - O[i])) * H[j]);
//W[4] -= rate * ((T[0] - O[0]) * (T[0] * (1 - T[0])) * H[0]);
//W[5] -= rate * ((T[0] - O[0]) * (T[0] * (1 - T[0])) * H[1]);
//W[6] -= rate * ((T[1] - O[1]) * (T[1] * (1 - T[1])) * H[0]);
//W[7] -= rate * ((T[1] - O[1]) * (T[1] * (1 - T[1])) * H[1]);
}
static double cerror(double *O, double* T, const int output)
{
double error = 0.0;
for(int i = 0; i < output; i++)
error += 0.5 * pow(T[i] - O[i], 2.0);
return error;
}
static double* train(double* I, double* T, const int inputs, const int output, const int hidden)
{
// Weights.
double* W = (double*) calloc(hidden * (inputs + output), sizeof(*W));
W[0] = 0.15;
W[1] = 0.20;
W[2] = 0.25;
W[3] = 0.30;
W[4] = 0.40;
W[5] = 0.45;
W[6] = 0.50;
W[7] = 0.55;
// Fixed at single hidden layer - only two biases are needed.
double B[] = { 0.35, 0.60 };
// Hidden layer.
double* H = (double*) calloc(hidden, sizeof(*H));
// Output layer. Will eventually converge to output with enough iterations.
double* O = (double*) calloc(output, sizeof(*O));
// Computes hidden and target nodes.
forepass(I, O, H, W, B, inputs, output, hidden);
// Computes output to target error.
double err = cerror(O, O, output);
printf("error: %f\n", err);
// Updates weights based on target error.
backpass(I, O, H, W, T, inputs, output, hidden, 0.5);
printf("W5: %f\n", W[4]);
printf("W6: %f\n", W[5]);
printf("W7: %f\n", W[6]);
printf("W8: %f\n", W[7]);
printf("%f\n", H[0]);
printf("%f\n", H[1]);
printf("%f\n", O[0]);
printf("%f\n", O[1]);
free(H);
return W;
}
double* predict(double* I, double* W, const int inputs, const int output)
{
double* O = NULL;
// ...
return O;
}
#include <stdlib.h>
int main()
{
const int inputs = 2, output = 2, hidden = 2;
// Input.
int i;
int inputs = 2;
int output = 2;
int hidden = 2;
double* I = (double*) calloc(inputs, sizeof(*I));
double* T = (double*) calloc(output, sizeof(*T));
Tinn tinn = tnew(inputs, output, hidden);
/* Input. */
I[0] = 0.05;
I[1] = 0.10;
// Target.
double* T = (double*) calloc(output, sizeof(*I));
/* Target. */
T[0] = 0.01;
T[1] = 0.99;
train(I, T, inputs, output, hidden);
for(i = 0; i < 10000; i++)
{
double error = ttrain(tinn, I, T, 0.5);
printf("error: %0.13f\n", error);
}
tfree(tinn);
free(I);
free(T);
return 0;
}