This commit is contained in:
Gustav Louw 2018-03-28 20:55:49 -07:00
parent dc8b1fecb5
commit 3a5346e72a
5 changed files with 32 additions and 822 deletions

331
Genann.h
View File

@ -1,331 +0,0 @@
//
// GENANN - Minimal C Artificial Neural Network
//
// Copyright (c) 2015, 2016 Lewis Van Winkle
//
// http://CodePlea.com
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgement in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
//
//
// This software has been altered from its original state. Namely white space edits
// and formatting but most importantly the library has been moved into a single
// static inline header file.
//
// - Gustav Louw 2018
#pragma once
#include <assert.h>
#include <errno.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
typedef double (*genann_actfun)(double a);
typedef struct
{
// How many inputs, outputs, and hidden neurons.
int inputs;
int hidden_layers;
int hidden;
int outputs;
// Which activation function to use for hidden neurons. Default: gennann_act_sigmoid_cached.
genann_actfun activation_hidden;
// Which activation function to use for output. Default: gennann_act_sigmoid_cached.
genann_actfun activation_output;
// Total number of weights, and size of weights buffer.
int total_weights;
// Total number of neurons + inputs and size of output buffer.
int total_neurons;
// All weights (total_weights long).
double *weight;
// Stores input array and output of each neuron (total_neurons long).
double *output;
// Stores delta of each hidden and output neuron (total_neurons - inputs long).
double *delta;
}
Genann;
static inline double genann_act_sigmoid(double a)
{
return a < -45.0 ? 0 : a > 45.0 ? 1.0 : 1.0 / (1 + exp(-a));
}
static inline double genann_act_sigmoid_cached(double a)
{
// If you're optimizing for memory usage, just
// delete this entire function and replace references
// of genann_act_sigmoid_cached to genann_act_sigmoid.
const double min = -15.0;
const double max = +15.0;
static double interval;
static int initialized = 0;
static double lookup[4096];
const int lookup_size = sizeof(lookup) / sizeof(*lookup);
// Calculate entire lookup table on first run.
if(!initialized)
{
interval = (max - min) / lookup_size;
for(int i = 0; i < lookup_size; ++i)
lookup[i] = genann_act_sigmoid(min + interval * i);
// This is down here to make this thread safe.
initialized = 1;
}
const int i = (int) ((a - min) / interval + 0.5);
return i <= 0 ? lookup[0] : i >= lookup_size ? lookup[lookup_size - 1] : lookup[i];
}
static inline double genann_act_threshold(double a)
{
return a > 0;
}
static inline double genann_act_linear(double a)
{
return a;
}
// We use the following for uniform random numbers between 0 and 1.
// If you have a better function, redefine this macro.
static inline double genann_random()
{
return (double) rand() / RAND_MAX;
}
static inline void genann_randomize(Genann *ann)
{
for(int i = 0; i < ann->total_weights; ++i)
{
double r = genann_random();
// Sets weights from -0.5 to 0.5.
ann->weight[i] = r - 0.5;
}
}
static inline Genann *genann_init(int inputs, int hidden_layers, int hidden, int outputs)
{
if(hidden_layers < 0)
return 0;
if(inputs < 1)
return 0;
if(outputs < 1)
return 0;
if(hidden_layers > 0 && hidden < 1)
return 0;
const int hidden_weights = hidden_layers ? (inputs + 1) * hidden + (hidden_layers - 1) * (hidden + 1) * hidden : 0;
const int output_weights = (hidden_layers ? (hidden + 1) : (inputs + 1)) * outputs;
const int total_weights = hidden_weights + output_weights;
const int total_neurons = inputs + hidden * hidden_layers + outputs;
// Allocate extra size for weights, outputs, and deltas.
const int size = sizeof(Genann) + sizeof(double) * (total_weights + total_neurons + (total_neurons - inputs));
Genann* ret = (Genann*) malloc(size);
if(!ret)
return 0;
ret->inputs = inputs;
ret->hidden_layers = hidden_layers;
ret->hidden = hidden;
ret->outputs = outputs;
ret->total_weights = total_weights;
ret->total_neurons = total_neurons;
// Set pointers.
ret->weight = (double*) ((char*) ret + sizeof(Genann));
ret->output = ret->weight + ret->total_weights;
ret->delta = ret->output + ret->total_neurons;
genann_randomize(ret);
ret->activation_hidden = genann_act_sigmoid_cached;
ret->activation_output = genann_act_sigmoid_cached;
return ret;
}
static inline void genann_free(Genann *ann)
{
// The weight, output, and delta pointers go to the same buffer.
free(ann);
}
static inline Genann *genann_copy(Genann const *ann)
{
const int size = sizeof(Genann) + sizeof(double) * (ann->total_weights + ann->total_neurons + (ann->total_neurons - ann->inputs));
Genann* ret = (Genann*) malloc(size);
if(!ret)
return 0;
memcpy(ret, ann, size);
// Set pointers.
ret->weight = (double*)((char*)ret + sizeof(Genann));
ret->output = ret->weight + ret->total_weights;
ret->delta = ret->output + ret->total_neurons;
return ret;
}
static inline double const *genann_run(Genann const *ann, double const *inputs)
{
double const *w = ann->weight;
double *o = ann->output + ann->inputs;
double const *i = ann->output;
// Copy the inputs to the scratch area, where we also store each neuron's
// output, for consistency. This way the first layer isn't a special case.
memcpy(ann->output, inputs, sizeof(double) * ann->inputs);
const genann_actfun act = ann->activation_hidden;
const genann_actfun acto = ann->activation_output;
// Figure hidden layers, if any.
for(int h = 0; h < ann->hidden_layers; ++h)
{
for(int j = 0; j < ann->hidden; ++j)
{
double sum = *w++ * -1.0;
for(int k = 0; k < (h == 0 ? ann->inputs : ann->hidden); ++k)
sum += *w++ * i[k];
*o++ = act(sum);
}
i += (h == 0 ? ann->inputs : ann->hidden);
}
double const *ret = o;
// Figure output layer.
for(int j = 0; j < ann->outputs; ++j)
{
double sum = *w++ * -1.0;
for(int k = 0; k < (ann->hidden_layers ? ann->hidden : ann->inputs); ++k)
sum += *w++ * i[k];
*o++ = acto(sum);
}
// Sanity check that we used all weights and wrote all outputs.
assert(w - ann->weight == ann->total_weights);
assert(o - ann->output == ann->total_neurons);
return ret;
}
static inline void genann_train(Genann const *ann, double const *inputs, double const *desired_outputs, double learning_rate) {
// To begin with, we must run the network forward.
genann_run(ann, inputs);
// First set the output layer deltas.
{
// First output.
double const *o = ann->output + ann->inputs + ann->hidden * ann->hidden_layers;
// First delta.
double *d = ann->delta + ann->hidden * ann->hidden_layers;
// First desired output.
double const *t = desired_outputs;
// Set output layer deltas.
if(ann->activation_output == genann_act_linear)
for(int j = 0; j < ann->outputs; ++j)
*d++ = *t++ - *o++;
else
for(int j = 0; j < ann->outputs; ++j)
{
*d++ = (*t - *o) * *o * (1.0 - *o);
++o; ++t;
}
}
// Set hidden layer deltas, start on last layer and work backwards.
// Note that loop is skipped in the case of hidden_layers == 0.
for(int h = ann->hidden_layers - 1; h >= 0; --h)
{
// Find first output and delta in this layer.
double const *o = ann->output + ann->inputs + (h * ann->hidden);
double *d = ann->delta + (h * ann->hidden);
// Find first delta in following layer (which may be hidden or output).
double const * const dd = ann->delta + ((h + 1) * ann->hidden);
// Find first weight in following layer (which may be hidden or output).
double const * const ww = ann->weight + ((ann->inputs + 1) * ann->hidden) + ((ann->hidden+1) * ann->hidden * (h));
for(int j = 0; j < ann->hidden; ++j)
{
double delta = 0;
for(int k = 0; k < (h == ann->hidden_layers - 1 ? ann->outputs : ann->hidden); ++k)
{
const double forward_delta = dd[k];
const int windex = k * (ann->hidden + 1) + (j + 1);
const double forward_weight = ww[windex];
delta += forward_delta * forward_weight;
}
*d = *o * (1.0-*o) * delta;
++d;
++o;
}
}
// Train the outputs.
{
// Find first output delta. First output delta.
const double * d = ann->delta + ann->hidden * ann->hidden_layers;
// Find first weight to first output delta.
double* w = ann->weight + (ann->hidden_layers ? ((ann->inputs + 1) * ann->hidden + (ann->hidden + 1) * ann->hidden * (ann->hidden_layers - 1)) : 0);
// Find first output in previous layer.
const double* const i = ann->output + (ann->hidden_layers ? (ann->inputs + ann->hidden * (ann->hidden_layers - 1)) : 0);
// Set output layer weights.
for(int j = 0; j < ann->outputs; ++j)
{
for(int k = 0; k < (ann->hidden_layers ? ann->hidden : ann->inputs) + 1; ++k)
*w++ += (k == 0) ? (*d * learning_rate * -1.0) : (*d * learning_rate * i[k-1]);
++d;
}
assert(w - ann->weight == ann->total_weights);
}
// Train the hidden layers.
for(int h = ann->hidden_layers - 1; h >= 0; --h)
{
// Find first delta in this layer.
const double* d = ann->delta + (h * ann->hidden);
// Find first input to this layer.
const double* i = ann->output + (h ? (ann->inputs + ann->hidden * (h - 1)) : 0);
// Find first weight to this layer.
double* w = ann->weight + (h ? ((ann->inputs + 1) * ann->hidden + (ann->hidden + 1) * (ann->hidden) * (h - 1)) : 0);
for(int j = 0; j < ann->hidden; ++j)
{
for(int k = 0; k < (h == 0 ? ann->inputs : ann->hidden) + 1; ++k)
*w++ += (k == 0) ? (*d * learning_rate * -1.0) : (*d * learning_rate * i[k - 1]);
++d;
}
}
}
static inline void genann_write(Genann const *ann, FILE *out)
{
fprintf(out, "%d %d %d %d", ann->inputs, ann->hidden_layers, ann->hidden, ann->outputs);
for(int i = 0; i < ann->total_weights; ++i)
fprintf(out, " %.20e", ann->weight[i]);
}
static inline Genann *genann_read(FILE *in)
{
int inputs;
int hidden_layers;
int hidden;
int outputs;
errno = 0;
int rc = fscanf(in, "%d %d %d %d", &inputs, &hidden_layers, &hidden, &outputs);
if(rc < 4 || errno != 0)
{
perror("fscanf");
return NULL;
}
Genann *ann = genann_init(inputs, hidden_layers, hidden, outputs);
for(int i = 0; i < ann->total_weights; ++i)
{
errno = 0;
rc = fscanf(in, " %le", ann->weight + i);
if(rc < 1 || errno != 0)
{
perror("fscanf");
genann_free(ann);
return NULL;
}
}
return ann;
}

View File

@ -3,7 +3,8 @@ CC = gcc
NAME = shaper
SRCS =
SRCS+= main.c
SRCS += main.c
SRCS += Tinn.c
# CompSpec defined in windows environment.
ifdef ComSpec
@ -13,22 +14,14 @@ else
endif
CFLAGS =
ifdef ComSpec
CFLAGS += -I ../SDL2-2.0.7/i686-w64-mingw32/include
endif
CFLAGS += -std=gnu99
CFLAGS += -std=c89
CFLAGS += -Wshadow -Wall -Wpedantic -Wextra -Wdouble-promotion -Wunused-result
CFLAGS += -g
CFLAGS += -Ofast -march=native -pipe
CFLAGS += -O2 -march=native -pipe
CFLAGS += -flto
LDFLAGS =
ifdef ComSpec
LDFLAGS += -L..\SDL2-2.0.7\i686-w64-mingw32\lib
LDFLAGS += -lmingw32
LDFLAGS += -lSDL2main
endif
LDFLAGS += -lSDL2 -lm
LDFLAGS += -lm
ifdef ComSpec
RM = del /F /Q
@ -40,15 +33,15 @@ endif
# Link.
$(BIN): $(SRCS:.c=.o)
@echo $(CC) *.o -o $(BIN)
@$(CC) $(CFLAGS) $(SRCS:.c=.o) $(LDFLAGS) -o $(BIN)
echo $(CC) *.o -o $(BIN)
$(CC) $(CFLAGS) $(SRCS:.c=.o) $(LDFLAGS) -o $(BIN)
# Compile.
%.o : %.c Makefile
@echo $(CC) -c $*.c
@$(CC) $(CFLAGS) -MMD -MP -MT $@ -MF $*.td -c $<
@$(RM) $*.d
@$(MV) $*.td $*.d
echo $(CC) -c $*.c
$(CC) $(CFLAGS) -MMD -MP -MT $@ -MF $*.td -c $<
$(RM) $*.d
$(MV) $*.td $*.d
%.d: ;
-include *.d

370
main.c
View File

@ -1,358 +1,30 @@
// This program uses a modified version of the Genann Neural Network Library
// to learn hand written digits.
//
// Get the training data from the machine learning database:
// wget http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data
#include "Tinn.h"
#include <errno.h>
#include <math.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define toss(t, n) ((t*) malloc((n) * sizeof(t)))
#define retoss(ptr, t, n) (ptr = (t*) realloc((ptr), (n) * sizeof(t)))
typedef double (*genann_actfun)(double);
typedef struct
int main()
{
double** id;
double** od;
int icols;
int ocols;
int rows;
int split;
}
Data;
typedef struct
{
int inputs;
int hidden_layers;
int hidden;
int outputs;
genann_actfun activation_hidden;
genann_actfun activation_output;
int total_weights;
int total_neurons;
double* weight;
double* output;
double* delta;
}
Genann;
static double genann_act_sigmoid(const double a)
{
return a < -45.0 ? 0 : a > 45.0 ? 1.0 : 1.0 / (1 + exp(-a));
}
static void genann_randomize(Genann* const ann)
{
for(int i = 0; i < ann->total_weights; i++)
int i;
int inputs = 2;
int output = 2;
int hidden = 2;
double* I = (double*) calloc(inputs, sizeof(*I));
double* T = (double*) calloc(output, sizeof(*T));
Tinn tinn = tnew(inputs, output, hidden);
/* Input. */
I[0] = 0.05;
I[1] = 0.10;
/* Target. */
T[0] = 0.01;
T[1] = 0.99;
for(i = 0; i < 10000; i++)
{
double r = (double) rand() / RAND_MAX;
ann->weight[i] = r - 0.5;
double error = ttrain(tinn, I, T, 0.5);
printf("error: %0.13f\n", error);
}
}
// Clean this up. The mallocs do not look right.
static Genann *genann_init(const int inputs, const int hidden_layers, const int hidden, const int outputs)
{
const int hidden_weights = hidden_layers ? (inputs + 1) * hidden + (hidden_layers - 1) * (hidden + 1) * hidden : 0;
const int output_weights = (hidden_layers ? (hidden + 1) : (inputs + 1)) * outputs;
const int total_weights = hidden_weights + output_weights;
const int total_neurons = inputs + hidden * hidden_layers + outputs;
// Allocate extra size for weights, outputs, and deltas.
const int size = sizeof(Genann) + sizeof(double) * (total_weights + total_neurons + (total_neurons - inputs));
Genann* ret = (Genann*) malloc(size);
ret->inputs = inputs;
ret->hidden_layers = hidden_layers;
ret->hidden = hidden;
ret->outputs = outputs;
ret->total_weights = total_weights;
ret->total_neurons = total_neurons;
// Set pointers.
ret->weight = (double*) ((char*) ret + sizeof(Genann));
ret->output = ret->weight + ret->total_weights;
ret->delta = ret->output + ret->total_neurons;
ret->activation_hidden = genann_act_sigmoid;
ret->activation_output = genann_act_sigmoid;
genann_randomize(ret);
return ret;
}
static double const *genann_run(Genann const *ann, double const *inputs)
{
const double* w = ann->weight;
double* o = ann->output + ann->inputs;
const double* i = ann->output;
// Copy the inputs to the scratch area, where we also store each neuron's
// output, for consistency. This way the first layer isn't a special case.
memcpy(ann->output, inputs, sizeof(double) * ann->inputs);
const genann_actfun act = ann->activation_hidden;
const genann_actfun acto = ann->activation_output;
// Figure hidden layers, if any.
for(int h = 0; h < ann->hidden_layers; h++)
{
for(int j = 0; j < ann->hidden; j++)
{
double sum = *w++ * -1.0;
for(int k = 0; k < (h == 0 ? ann->inputs : ann->hidden); k++)
sum += *w++ * i[k];
*o++ = act(sum);
}
i += (h == 0 ? ann->inputs : ann->hidden);
}
const double* ret = o;
// Figure output layer.
for(int j = 0; j < ann->outputs; ++j)
{
double sum = *w++ * -1.0;
for(int k = 0; k < (ann->hidden_layers ? ann->hidden : ann->inputs); ++k)
sum += *w++ * i[k];
*o++ = acto(sum);
}
return ret;
}
static void genann_train(const Genann* ann, const double* inputs, const double* desired_outputs, const double rate)
{
// To begin with, we must run the network forward.
genann_run(ann, inputs);
// First set the output layer deltas.
{
// First output.
const double* o = ann->output + ann->inputs + ann->hidden * ann->hidden_layers;
// First delta.
double* d = ann->delta + ann->hidden * ann->hidden_layers;
// First desired output.
const double* t = desired_outputs;
// Set output layer deltas.
for(int j = 0; j < ann->outputs; j++, o++, t++)
*d++ = (*t - *o) * *o * (1.0 - *o);
}
// Set hidden layer deltas, start on last layer and work backwards.
// Note that loop is skipped in the case of hidden_layers == 0.
for(int h = ann->hidden_layers - 1; h >= 0; h--)
{
// Find first output and delta in this layer.
const double* o = ann->output + ann->inputs + (h * ann->hidden);
double* d = ann->delta + (h * ann->hidden);
// Find first delta in following layer (which may be hidden or output).
const double* const dd = ann->delta + ((h + 1) * ann->hidden);
// Find first weight in following layer (which may be hidden or output).
const double* const ww = ann->weight + ((ann->inputs + 1) * ann->hidden) + ((ann->hidden+1) * ann->hidden * (h));
for(int j = 0; j < ann->hidden; j++, d++, o++)
{
double delta = 0;
for(int k = 0; k < (h == ann->hidden_layers - 1 ? ann->outputs : ann->hidden); k++)
{
const double forward_delta = dd[k];
const int windex = k * (ann->hidden + 1) + (j + 1);
const double forward_weight = ww[windex];
delta += forward_delta * forward_weight;
}
*d = *o * (1.0 - *o) * delta;
}
}
// Train the outputs.
{
// Find first output delta. First output delta.
const double* d = ann->delta + ann->hidden * ann->hidden_layers;
// Find first weight to first output delta.
double* w = ann->weight + (ann->hidden_layers ? ((ann->inputs + 1) * ann->hidden + (ann->hidden + 1) * ann->hidden * (ann->hidden_layers - 1)) : 0);
// Find first output in previous layer.
const double* const i = ann->output + (ann->hidden_layers ? (ann->inputs + ann->hidden * (ann->hidden_layers - 1)) : 0);
// Set output layer weights.
for(int j = 0; j < ann->outputs; ++j, ++d)
for(int k = 0; k < (ann->hidden_layers ? ann->hidden : ann->inputs) + 1; k++)
*w++ += (k == 0) ? (*d * rate * -1.0) : (*d * rate * i[k - 1]);
}
// Train the hidden layers.
for(int h = ann->hidden_layers - 1; h >= 0; h--)
{
// Find first delta in this layer.
const double* d = ann->delta + (h * ann->hidden);
// Find first input to this layer.
double* const i = ann->output + (h ? (ann->inputs + ann->hidden * (h - 1)) : 0);
// Find first weight to this layer.
double* w = ann->weight + (h ? ((ann->inputs + 1) * ann->hidden + (ann->hidden + 1) * (ann->hidden) * (h - 1)) : 0);
for(int j = 0; j < ann->hidden; j++, d++)
for(int k = 0; k < (h == 0 ? ann->inputs : ann->hidden) + 1; k++)
*w++ += (k == 0) ? (*d * rate * -1.0) : (*d * rate * i[k - 1]);
}
}
static int lns(FILE* const file)
{
int ch = EOF;
int lines = 0;
int pc = '\n';
while((ch = getc(file)) != EOF)
{
if(ch == '\n')
lines++;
pc = ch;
}
if(pc != '\n')
lines++;
rewind(file);
return lines;
}
static char* readln(FILE* const file)
{
int ch = EOF;
int reads = 0;
int size = 128;
char* line = toss(char, size);
while((ch = getc(file)) != '\n' && ch != EOF)
{
line[reads++] = ch;
if(reads + 1 == size)
retoss(line, char, size *= 2);
}
line[reads] = '\0';
return line;
}
static double** new2d(const int rows, const int cols)
{
double** row = toss(double*, rows);
for(int r = 0; r < rows; r++)
row[r] = toss(double, cols);
return row;
}
static Data ndata(const int icols, const int ocols, const int rows, const double percentage)
{
const Data data = {
new2d(rows, icols), new2d(rows, ocols), icols, ocols, rows, (int) (rows * percentage)
};
return data;
}
static void parse(const Data data, char* line, const int row)
{
const int cols = data.icols + data.ocols;
for(int col = 0; col < cols; col++)
{
const float val = atof(strtok(col == 0 ? line : NULL, " "));
if(col < data.icols)
data.id[row][col] = val;
else
data.od[row][col - data.icols] = val;
}
}
static void dfree(const Data d)
{
for(int row = 0; row < d.rows; row++)
{
free(d.id[row]);
free(d.od[row]);
}
free(d.id);
free(d.od);
}
static void shuffle(const Data d, const int upper)
{
for(int a = 0; a < upper; a++)
{
const int b = rand() % d.split;
double* ot = d.od[a];
double* it = d.id[a];
// Swap output.
d.od[a] = d.od[b];
d.od[b] = ot;
// Swap input.
d.id[a] = d.id[b];
d.id[b] = it;
}
}
static void print(const double* const arr, const int size, const double thresh)
{
for(int i = 0; i < size; i++)
printf("%d ", arr[i] > thresh);
}
static int cmp(const double* const a, const double* const b, const int size, const double thresh)
{
for(int i = 0; i < size; i++)
{
const int aa = a[i] > thresh;
const int bb = b[i] > thresh;
if(aa != bb)
return 0;
}
return 1;
}
static void predict(Genann* ann, const Data d)
{
const double thresh = 0.8;
int matches = 0;
for(int i = d.split; i < d.rows; i++)
{
// Prediciton.
const double* const pred = genann_run(ann, d.id[i]);
const double* const real = d.od[i];
print(pred, d.ocols, thresh);
printf(":: ");
print(real, d.ocols, thresh);
const int match = cmp(pred, real, d.ocols, thresh);
printf("-> %d\n", match);
matches += match;
}
printf("%f\n", (double) matches / (d.rows - d.split));
}
static Data build(const char* path, const int icols, const int ocols, const double percentage)
{
FILE* file = fopen(path, "r");
const int rows = lns(file);
Data data = ndata(icols, ocols, rows, percentage);
for(int row = 0; row < rows; row++)
{
char* line = readln(file);
parse(data, line, row);
free(line);
}
fclose(file);
return data;
}
static Genann* train(const Data d, const int ntimes, const int layers, const int neurons, const double rate)
{
Genann* const ann = genann_init(d.icols, layers, neurons, d.ocols);
double annealed = rate;
for(int i = 0; i < ntimes; i++)
{
shuffle(d, d.split);
for(int j = 0; j < d.split; j++)
genann_train(ann, d.id[j], d.od[j], annealed);
printf("%f: %f\n", (double) i / ntimes, annealed);
annealed *= 0.95;
}
return ann;
}
int main(int argc, char* argv[])
{
(void) argc;
(void) argv;
srand(time(0));
const Data data = build("semeion.data", 256, 10, 0.9);
shuffle(data, data.rows);
Genann* ann = train(data, 128, 1, data.icols / 2.0, 3.0); // Hyperparams.
predict(ann, data);
free(ann);
dfree(data);
tfree(tinn);
free(I);
free(T);
return 0;
}

94
test.c
View File

@ -1,94 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
static double act(const double in)
{
return 1.0 / (1.0 + exp(-in));
}
static double shid(const double W[], const double I[], const int neuron, const int inputs)
{
double sum = 0.0;
int i;
for(i = 0; i < inputs; i++)
sum += I[i] * W[i + neuron * inputs];
return sum;
}
static double sout(const double W[], const double I[], const int neuron, const int inputs, const int hidden)
{
double sum = 0.0;
int i;
for(i = 0; i < inputs; i++)
sum += I[i] * W[i + hidden * (neuron + inputs)];
return sum;
}
static double cerr(const double T[], const double O[], const int count)
{
double ssqr = 0.0;
int i;
for(i = 0; i < count; i++)
{
const double sub = T[i] - O[i];
ssqr += sub * sub;
}
return 0.5 * ssqr;
}
static void bprop(double W[], const double I[], const double H[], const double O[], const double T[], const double rate)
{
const double a = -(T[0] - O[0]) * O[0] * (1.0 - O[0]);
const double b = -(T[1] - O[1]) * O[1] * (1.0 - O[1]);
const double c = (W[4] * a + W[6] * b) * (1.0 - H[0]);
const double d = (W[5] * a + W[7] * b) * (1.0 - H[1]);
/* Hidden layer */
W[0] -= rate * H[0] * c * I[0];
W[1] -= rate * H[0] * c * I[1];
W[2] -= rate * H[1] * d * I[0];
W[3] -= rate * H[1] * d * I[1];
/* Output layer */
W[4] -= rate * H[0] * a;
W[5] -= rate * H[1] * a;
W[6] -= rate * H[0] * b;
W[7] -= rate * H[1] * b;
}
/* Single layer feed forward neural network with back propogation error correction */
static double train(const double I[], const double T[], const int nips, const int nops, const double rate, const int iters)
{
const double B[] = { 0.35, 0.60 };
const int nhid = sizeof(B) / sizeof(*B);
double W[] = { 0.15, 0.20, 0.25, 0.30, 0.40, 0.45, 0.50, 0.55 };
double* H = (double*) malloc(sizeof(*H) * nhid);
double* O = (double*) malloc(sizeof(*O) * nops);
double error;
int iter;
for(iter = 0; iter < iters; iter++)
{
int i;
for(i = 0; i < nhid; i++) H[i] = act(B[0] + shid(W, I, i, nips));
for(i = 0; i < nops; i++) O[i] = act(B[1] + sout(W, H, i, nips, nhid));
bprop(W, I, H, O, T, rate);
}
error = cerr(T, O, nops);
free(H);
free(O);
return error;
}
int main()
{
const double rate = 0.5;
const double I[] = { 0.05, 0.10 };
const double T[] = { 0.01, 0.99 };
const double error = train(I, T, sizeof(I) / sizeof(*I), sizeof(T) / sizeof(*T), rate, 10000);
printf("%f\n", error);
return 0;
}

30
test2.c
View File

@ -1,30 +0,0 @@
#include "Tinn.h"
#include <stdio.h>
#include <stdlib.h>
int main()
{
int i;
int inputs = 2;
int output = 2;
int hidden = 2;
double* I = (double*) calloc(inputs, sizeof(*I));
double* T = (double*) calloc(output, sizeof(*T));
Tinn tinn = tnew(inputs, output, hidden);
/* Input. */
I[0] = 0.05;
I[1] = 0.10;
/* Target. */
T[0] = 0.01;
T[1] = 0.99;
for(i = 0; i < 10000; i++)
{
double error = ttrain(tinn, I, T, 0.5);
printf("error: %0.13f\n", error);
}
tfree(tinn);
free(I);
free(T);
return 0;
}