From 0228fdb32e89270fece8c1e186d1f891a4452f92 Mon Sep 17 00:00:00 2001 From: Gustav Louw Date: Sat, 31 Mar 2018 04:29:03 -0700 Subject: [PATCH 01/12] double to float --- .gitignore | 1 + Tinn.c | 60 +++++++++++++++++++++++++++--------------------------- Tinn.h | 14 ++++++------- test.c | 32 ++++++++++++++--------------- 4 files changed, 54 insertions(+), 53 deletions(-) diff --git a/.gitignore b/.gitignore index 6e56bdf..64242fc 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ *.dat* +*.txt *.o *.d tinn diff --git a/Tinn.c b/Tinn.c index d4b39a1..e483c9c 100644 --- a/Tinn.c +++ b/Tinn.c @@ -6,55 +6,55 @@ #include // Error function. -static double err(double a, double b) +static float err(float a, float b) { - return 0.5 * pow(a - b, 2.0); + return 0.5f * powf(a - b, 2.0f); } // Partial derivative of error function. -static double pderr(double a, double b) +static float pderr(float a, float b) { return a - b; } // Total error. -static double terr(const double* tg, const double* o, int size) +static float terr(const float* tg, const float* o, int size) { - double sum = 0.0; + float sum = 0.0f; for(int i = 0; i < size; i++) sum += err(tg[i], o[i]); return sum; } // Activation function. -static double act(double a) +static float act(float a) { - return 1.0 / (1.0 + exp(-a)); + return 1.0f / (1.0f + expf(-a)); } // Partial derivative of activation function. -static double pdact(double a) +static float pdact(float a) { - return a * (1.0 - a); + return a * (1.0f - a); } // Floating point random from 0.0 - 1.0. -static double frand() +static float frand() { - return rand() / (double) RAND_MAX; + return rand() / (float) RAND_MAX; } // Back propagation. -static void backwards(const Tinn t, const double* in, const double* tg, double rate) +static void backwards(const Tinn t, const float* in, const float* tg, float rate) { for(int i = 0; i < t.nhid; i++) { - double sum = 0.0; + float sum = 0.0f; // Calculate total error change with respect to output. for(int j = 0; j < t.nops; j++) { - double a = pderr(t.o[j], tg[j]); - double b = pdact(t.o[j]); + float a = pderr(t.o[j], tg[j]); + float b = pdact(t.o[j]); sum += a * b * t.x[j * t.nhid + i]; // Correct weights in hidden to output layer. t.x[j * t.nhid + i] -= rate * a * b * t.h[i]; @@ -66,12 +66,12 @@ static void backwards(const Tinn t, const double* in, const double* tg, double r } // Forward propagation. -static void forewards(const Tinn t, const double* in) +static void forewards(const Tinn t, const float* in) { // Calculate hidden layer neuron values. for(int i = 0; i < t.nhid; i++) { - double sum = 0.0; + float sum = 0.0f; for(int j = 0; j < t.nips; j++) sum += in[j] * t.w[i * t.nips + j]; t.h[i] = act(sum + t.b[0]); @@ -79,7 +79,7 @@ static void forewards(const Tinn t, const double* in) // Calculate output layer neuron values. for(int i = 0; i < t.nops; i++) { - double sum = 0.0; + float sum = 0.0f; for(int j = 0; j < t.nhid; j++) sum += t.h[j] * t.x[i * t.nhid + j]; t.o[i] = act(sum + t.b[1]); @@ -89,17 +89,17 @@ static void forewards(const Tinn t, const double* in) // Randomizes weights and biases. static void twrand(const Tinn t) { - for(int i = 0; i < t.nw; i++) t.w[i] = frand() - 0.5; - for(int i = 0; i < t.nb; i++) t.b[i] = frand() - 0.5; + for(int i = 0; i < t.nw; i++) t.w[i] = frand() - 0.5f; + for(int i = 0; i < t.nb; i++) t.b[i] = frand() - 0.5f; } -double* xpredict(const Tinn t, const double* in) +float* xpredict(const Tinn t, const float* in) { forewards(t, in); return t.o; } -double xttrain(const Tinn t, const double* in, const double* tg, double rate) +float xttrain(const Tinn t, const float* in, const float* tg, float rate) { forewards(t, in); backwards(t, in, tg, rate); @@ -112,11 +112,11 @@ Tinn xtbuild(int nips, int nhid, int nops) // Tinn only supports one hidden layer so there are two biases. t.nb = 2; t.nw = nhid * (nips + nops); - t.w = (double*) calloc(t.nw, sizeof(*t.w)); + t.w = (float*) calloc(t.nw, sizeof(*t.w)); t.x = t.w + nhid * nips; - t.b = (double*) calloc(t.nb, sizeof(*t.b)); - t.h = (double*) calloc(nhid, sizeof(*t.h)); - t.o = (double*) calloc(nops, sizeof(*t.o)); + t.b = (float*) calloc(t.nb, sizeof(*t.b)); + t.h = (float*) calloc(nhid, sizeof(*t.h)); + t.o = (float*) calloc(nops, sizeof(*t.o)); t.nips = nips; t.nhid = nhid; t.nops = nops; @@ -131,8 +131,8 @@ void xtsave(const Tinn t, const char* path) // Header. fprintf(file, "%d %d %d\n", t.nips, t.nhid, t.nops); // Biases and weights. - for(int i = 0; i < t.nb; i++) fprintf(file, "%lf\n", t.b[i]); - for(int i = 0; i < t.nw; i++) fprintf(file, "%lf\n", t.w[i]); + for(int i = 0; i < t.nb; i++) fprintf(file, "%f\n", (double) t.b[i]); + for(int i = 0; i < t.nw; i++) fprintf(file, "%f\n", (double) t.w[i]); fclose(file); } @@ -147,8 +147,8 @@ Tinn xtload(const char* path) // A new tinn is returned. Tinn t = xtbuild(nips, nhid, nips); // Biases and weights. - for(int i = 0; i < t.nb; i++) fscanf(file, "%lf\n", &t.b[i]); - for(int i = 0; i < t.nw; i++) fscanf(file, "%lf\n", &t.w[i]); + for(int i = 0; i < t.nb; i++) fscanf(file, "%f\n", &t.b[i]); + for(int i = 0; i < t.nw; i++) fscanf(file, "%f\n", &t.w[i]); fclose(file); return t; } diff --git a/Tinn.h b/Tinn.h index 6c26141..250edc5 100644 --- a/Tinn.h +++ b/Tinn.h @@ -2,11 +2,11 @@ typedef struct { - double* w; // All the weights. - double* x; // Hidden to output layer weights. - double* b; // Biases. - double* h; // Hidden layer. - double* o; // Output layer. + float* w; // All the weights. + float* x; // Hidden to output layer weights. + float* b; // Biases. + float* h; // Hidden layer. + float* o; // Output layer. // Number of biases - always two - Tinn only supports a single hidden layer. int nb; @@ -22,7 +22,7 @@ Tinn; // Trains a tinn with an input and target output with a learning rate. // Returns error rate of the neural network. -double xttrain(const Tinn, const double* in, const double* tg, double rate); +float xttrain(const Tinn, const float* in, const float* tg, float rate); // Builds a new tinn object given number of inputs (nips), // number of hidden neurons for the hidden layer (nhid), @@ -30,7 +30,7 @@ double xttrain(const Tinn, const double* in, const double* tg, double rate); Tinn xtbuild(int nips, int nhid, int nops); // Returns an output prediction given an input. -double* xpredict(const Tinn, const double* in); +float* xpredict(const Tinn, const float* in); // Saves the tinn to disk. void xtsave(const Tinn, const char* path); diff --git a/test.c b/test.c index 987f406..f263077 100644 --- a/test.c +++ b/test.c @@ -5,8 +5,8 @@ typedef struct { - double** in; - double** tg; + float** in; + float** tg; int nips; int nops; int rows; @@ -46,11 +46,11 @@ static char* readln(FILE* const file) return line; } -static double** new2d(const int rows, const int cols) +static float** new2d(const int rows, const int cols) { - double** row = (double**) malloc((rows) * sizeof(double*)); + float** row = (float**) malloc((rows) * sizeof(float*)); for(int r = 0; r < rows; r++) - row[r] = (double*) malloc((cols) * sizeof(double)); + row[r] = (float*) malloc((cols) * sizeof(float)); return row; } @@ -67,7 +67,7 @@ static void parse(const Data data, char* line, const int row) const int cols = data.nips + data.nops; for(int col = 0; col < cols; col++) { - const double val = atof(strtok(col == 0 ? line : NULL, " ")); + const float val = atof(strtok(col == 0 ? line : NULL, " ")); if(col < data.nips) data.in[row][col] = val; else @@ -91,8 +91,8 @@ static void shuffle(const Data d) for(int a = 0; a < d.rows; a++) { const int b = rand() % d.rows; - double* ot = d.tg[a]; - double* it = d.in[a]; + float* ot = d.tg[a]; + float* it = d.in[a]; // Swap output. d.tg[a] = d.tg[b]; d.tg[b] = ot; @@ -135,8 +135,8 @@ int main() // It can be fine tuned along with the number of hidden layers. // Feel free to modify the anneal rate as well. const int nhid = 30; - double rate = 1.0; - const double anneal = 0.99; + float rate = 1.0; + const float anneal = 0.99; // Load the training set. const Data data = build("semeion.data", nips, nops); // Train, baby, train. @@ -144,11 +144,11 @@ int main() for(int i = 0; i < 100; i++) { shuffle(data); - double error = 0.0; + float error = 0.0; for(int j = 0; j < data.rows; j++) { - const double* const in = data.in[j]; - const double* const tg = data.tg[j]; + const float* const in = data.in[j]; + const float* const tg = data.tg[j]; error += xttrain(tinn, in, tg, rate); } printf("error %.12f :: rate %f\n", error / data.rows, rate); @@ -162,9 +162,9 @@ int main() // Now we do a prediction with the neural network we loaded from disk. // Ideally, we would also load a testing set to make the prediction with, // but for the sake of brevity here we just reuse the training set from earlier. - const double* const in = data.in[0]; - const double* const tg = data.tg[0]; - const double* const pd = xpredict(loaded, in); + const float* const in = data.in[0]; + const float* const tg = data.tg[0]; + const float* const pd = xpredict(loaded, in); for(int i = 0; i < data.nops; i++) { printf("%f ", tg[i]); } printf("\n"); for(int i = 0; i < data.nops; i++) { printf("%f ", pd[i]); } printf("\n"); // All done. Let's clean up. From ae2c658354ceae1fa5fbc9f5fe599be8de80af48 Mon Sep 17 00:00:00 2001 From: Gustav Louw Date: Sat, 31 Mar 2018 04:29:56 -0700 Subject: [PATCH 02/12] double to float --- test.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test.c b/test.c index f263077..1a675cf 100644 --- a/test.c +++ b/test.c @@ -151,7 +151,7 @@ int main() const float* const tg = data.tg[j]; error += xttrain(tinn, in, tg, rate); } - printf("error %.12f :: rate %f\n", error / data.rows, rate); + printf("error %.12f :: rate %f\n", (double) error / data.rows, (double) rate); rate *= anneal; } // This is how you save the neural network to disk. @@ -165,8 +165,8 @@ int main() const float* const in = data.in[0]; const float* const tg = data.tg[0]; const float* const pd = xpredict(loaded, in); - for(int i = 0; i < data.nops; i++) { printf("%f ", tg[i]); } printf("\n"); - for(int i = 0; i < data.nops; i++) { printf("%f ", pd[i]); } printf("\n"); + for(int i = 0; i < data.nops; i++) { printf("%f ", (double) tg[i]); } printf("\n"); + for(int i = 0; i < data.nops; i++) { printf("%f ", (double) pd[i]); } printf("\n"); // All done. Let's clean up. xtfree(loaded); dfree(data); From c74ab3ab22b77648492aa254a8002c45c3f3369c Mon Sep 17 00:00:00 2001 From: Gustav Louw Date: Sat, 31 Mar 2018 04:33:12 -0700 Subject: [PATCH 03/12] less hidden layers --- test.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test.c b/test.c index 1a675cf..0133777 100644 --- a/test.c +++ b/test.c @@ -134,9 +134,9 @@ int main() // Learning rate is annealed and thus not constant. // It can be fine tuned along with the number of hidden layers. // Feel free to modify the anneal rate as well. - const int nhid = 30; - float rate = 1.0; - const float anneal = 0.99; + const int nhid = 8; + float rate = 0.5f; + const float anneal = 0.99f; // Load the training set. const Data data = build("semeion.data", nips, nops); // Train, baby, train. @@ -144,7 +144,7 @@ int main() for(int i = 0; i < 100; i++) { shuffle(data); - float error = 0.0; + float error = 0.0f; for(int j = 0; j < data.rows; j++) { const float* const in = data.in[j]; From bcf16644d2e3345dd3c378cc53d30a73984decdc Mon Sep 17 00:00:00 2001 From: Gustav Louw Date: Sun, 1 Apr 2018 00:59:20 -0700 Subject: [PATCH 04/12] more hidden layers --- test.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test.c b/test.c index 0133777..7e31c3e 100644 --- a/test.c +++ b/test.c @@ -134,8 +134,8 @@ int main() // Learning rate is annealed and thus not constant. // It can be fine tuned along with the number of hidden layers. // Feel free to modify the anneal rate as well. - const int nhid = 8; - float rate = 0.5f; + const int nhid = 28; + float rate = 1.0f; const float anneal = 0.99f; // Load the training set. const Data data = build("semeion.data", nips, nops); From 08ada030cbb55310111b8d2e87d009c21cee1441 Mon Sep 17 00:00:00 2001 From: Gustav Louw Date: Sun, 1 Apr 2018 15:25:47 -0700 Subject: [PATCH 05/12] tinn no longer seeds rng --- Tinn.c | 2 -- test.c | 3 +++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Tinn.c b/Tinn.c index e483c9c..412980b 100644 --- a/Tinn.c +++ b/Tinn.c @@ -3,7 +3,6 @@ #include #include #include -#include // Error function. static float err(float a, float b) @@ -120,7 +119,6 @@ Tinn xtbuild(int nips, int nhid, int nops) t.nips = nips; t.nhid = nhid; t.nops = nops; - srand(time(0)); twrand(t); return t; } diff --git a/test.c b/test.c index 7e31c3e..7362aa8 100644 --- a/test.c +++ b/test.c @@ -1,5 +1,6 @@ #include "Tinn.h" #include +#include #include #include @@ -126,6 +127,8 @@ static Data build(const char* path, const int nips, const int nops) int main() { + // Tinn does not seed the random number generator. + srand(time(0)); // Input and output size is harded coded here as machine learning // repositories usually don't include the input and output size in the data itself. const int nips = 256; From ea130335dabe2e21bb835aa12813c8fe870565f2 Mon Sep 17 00:00:00 2001 From: Gustav Louw Date: Sun, 1 Apr 2018 17:01:42 -0700 Subject: [PATCH 06/12] const correctness --- Tinn.c | 10 +++++----- Tinn.h | 7 ++----- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/Tinn.c b/Tinn.c index 412980b..af83896 100644 --- a/Tinn.c +++ b/Tinn.c @@ -52,8 +52,8 @@ static void backwards(const Tinn t, const float* in, const float* tg, float rate // Calculate total error change with respect to output. for(int j = 0; j < t.nops; j++) { - float a = pderr(t.o[j], tg[j]); - float b = pdact(t.o[j]); + const float a = pderr(t.o[j], tg[j]); + const float b = pdact(t.o[j]); sum += a * b * t.x[j * t.nhid + i]; // Correct weights in hidden to output layer. t.x[j * t.nhid + i] -= rate * a * b * t.h[i]; @@ -125,7 +125,7 @@ Tinn xtbuild(int nips, int nhid, int nops) void xtsave(const Tinn t, const char* path) { - FILE* file = fopen(path, "w"); + FILE* const file = fopen(path, "w"); // Header. fprintf(file, "%d %d %d\n", t.nips, t.nhid, t.nops); // Biases and weights. @@ -136,14 +136,14 @@ void xtsave(const Tinn t, const char* path) Tinn xtload(const char* path) { - FILE* file = fopen(path, "r"); + FILE* const file = fopen(path, "r"); int nips = 0; int nhid = 0; int nops = 0; // Header. fscanf(file, "%d %d %d\n", &nips, &nhid, &nops); // A new tinn is returned. - Tinn t = xtbuild(nips, nhid, nips); + const Tinn t = xtbuild(nips, nhid, nips); // Biases and weights. for(int i = 0; i < t.nb; i++) fscanf(file, "%f\n", &t.b[i]); for(int i = 0; i < t.nw; i++) fscanf(file, "%f\n", &t.w[i]); diff --git a/Tinn.h b/Tinn.h index 250edc5..610fac1 100644 --- a/Tinn.h +++ b/Tinn.h @@ -8,11 +8,8 @@ typedef struct float* h; // Hidden layer. float* o; // Output layer. - // Number of biases - always two - Tinn only supports a single hidden layer. - int nb; - - // Number of weights. - int nw; + int nb; // Number of biases - always two - Tinn only supports a single hidden layer. + int nw; // Number of weights. int nips; // Number of inputs. int nhid; // Number of hidden neurons. From dbd3d71de5ddab3f53684bfd0118e44ef0d73bdd Mon Sep 17 00:00:00 2001 From: Sascha Grunert Date: Mon, 2 Apr 2018 15:31:27 +0200 Subject: [PATCH 07/12] Update README.md --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 99bfad3..92c0a3f 100644 --- a/README.md +++ b/README.md @@ -10,13 +10,13 @@ Tinn can be compiled with any C++ compiler as well. int main() { - double in[] = { 0.05, 0.10 }; - double tg[] = { 0.01, 0.99 }; + float in[] = { 0.05, 0.10 }; + float tg[] = { 0.01, 0.99 }; /* Two hidden neurons */ const Tinn tinn = xtbuild(len(in), 2, len(tg)); for(int i = 0; i < 1000; i++) { - double error = xttrain(tinn, in, tg, 0.5); + float error = xttrain(tinn, in, tg, 0.5); printf("%.12f\n", error); } xtfree(tinn); From be0f1f3247e235948a5821d3a2ccbe5477cb03c8 Mon Sep 17 00:00:00 2001 From: Gustav Louw Date: Mon, 2 Apr 2018 12:42:19 -0700 Subject: [PATCH 08/12] interactive --- Makefile | 1 + test.c | 112 ++++++++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 104 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 40382d5..a16be38 100644 --- a/Makefile +++ b/Makefile @@ -25,6 +25,7 @@ CFLAGS += -flto LDFLAGS = LDFLAGS += -lm +LDFLAGS += -lSDL2 ifdef ComSpec RM = del /F /Q diff --git a/test.c b/test.c index 7362aa8..938f3e5 100644 --- a/test.c +++ b/test.c @@ -1,8 +1,19 @@ #include "Tinn.h" #include +#include #include +#include #include #include +#include + +typedef struct +{ + bool down; + int x; + int y; +} +Input; typedef struct { @@ -125,6 +136,93 @@ static Data build(const char* path, const int nips, const int nops) return data; } +void dprint(const float* const p, const int size) +{ + for(int i = 0; i < size; i++) + printf("%f ", (double) p[i]); + printf("\n"); +} + +typedef struct +{ + int i; + float val; +} +Index; + +Index ixmax(const float* const p, const int size) +{ + Index ix; + ix.val = -FLT_MAX; + for(int i = 0; i < size; i++) + if(p[i] > ix.val) + ix.val = p[ix.i = i]; + return ix; +} + +void dploop(const Tinn tinn, const Data data) +{ + SDL_Renderer* renderer; + SDL_Window* window; + #define W 16 + #define H 16 + #define S 20 + const int xres = W * S; + const int yres = H * S; + SDL_CreateWindowAndRenderer(xres, yres, 0, &window, &renderer); + static float digit[W * H]; + Input input = { false, 0, 0 }; + for(SDL_Event e; true; SDL_PollEvent(&e)) + { + if(e.type == SDL_QUIT) + exit(1); + const int button = SDL_GetMouseState(&input.x, &input.y); + // Draw digit. + if(button) + { + const int xx = input.x / S; + const int yy = input.y / S; + const int w = 2; + for(int i = 0; i < w; i++) + for(int j = 0; j < w; j++) + digit[(xx + i) + W * (yy + j)] = 1.0f; + input.down = true; + } + // Predict. + else + { + if(input.down) + { + const float* const pred = xpredict(tinn, digit); + dprint(pred, data.nops); + const Index ix = ixmax(pred, data.nops); + if(ix.val > 0.9f) + printf("%d\n", ix.i); + else + printf("I do not recognize that digit\n"); + memset((void*) digit, 0, sizeof(digit)); + } + input.down = false; + } + // Draw digit to screen. + for(int x = 0; x < xres; x++) + for(int y = 0; y < yres; y++) + { + const int xx = x / S; + const int yy = y / S; + digit[xx + W * yy] == 1.0f ? + SDL_SetRenderDrawColor(renderer, 0xFF, 0xFF, 0xFF, 0xFF): + SDL_SetRenderDrawColor(renderer, 0x00, 0x00, 0x00, 0xFF); + SDL_RenderDrawPoint(renderer, x, y); + } + SDL_RenderPresent(renderer); + SDL_Delay(15); + } + #undef W + #undef H + #undef S +} + int main() { // Tinn does not seed the random number generator. @@ -144,7 +242,7 @@ int main() const Data data = build("semeion.data", nips, nops); // Train, baby, train. const Tinn tinn = xtbuild(nips, nhid, nops); - for(int i = 0; i < 100; i++) + for(int i = 0; i < 200; i++) { shuffle(data); float error = 0.0f; @@ -163,14 +261,10 @@ int main() // This is how you load the neural network from disk. const Tinn loaded = xtload("saved.tinn"); // Now we do a prediction with the neural network we loaded from disk. - // Ideally, we would also load a testing set to make the prediction with, - // but for the sake of brevity here we just reuse the training set from earlier. - const float* const in = data.in[0]; - const float* const tg = data.tg[0]; - const float* const pd = xpredict(loaded, in); - for(int i = 0; i < data.nops; i++) { printf("%f ", (double) tg[i]); } printf("\n"); - for(int i = 0; i < data.nops; i++) { printf("%f ", (double) pd[i]); } printf("\n"); - // All done. Let's clean up. + // SDL will create a window so that you can draw digits. + // Enter the draw and predict loop: + dploop(loaded, data); + // All done. Let's clean up xtfree(loaded); dfree(data); return 0; From 5614720d8854e70adbc014078dc5f6cbf0e88f2d Mon Sep 17 00:00:00 2001 From: Gustav Louw Date: Mon, 2 Apr 2018 15:33:34 -0700 Subject: [PATCH 09/12] Revert "interactive" This reverts commit be0f1f3247e235948a5821d3a2ccbe5477cb03c8. --- Makefile | 1 - test.c | 112 +++++-------------------------------------------------- 2 files changed, 9 insertions(+), 104 deletions(-) diff --git a/Makefile b/Makefile index a16be38..40382d5 100644 --- a/Makefile +++ b/Makefile @@ -25,7 +25,6 @@ CFLAGS += -flto LDFLAGS = LDFLAGS += -lm -LDFLAGS += -lSDL2 ifdef ComSpec RM = del /F /Q diff --git a/test.c b/test.c index 938f3e5..7362aa8 100644 --- a/test.c +++ b/test.c @@ -1,19 +1,8 @@ #include "Tinn.h" #include -#include #include -#include #include #include -#include - -typedef struct -{ - bool down; - int x; - int y; -} -Input; typedef struct { @@ -136,93 +125,6 @@ static Data build(const char* path, const int nips, const int nops) return data; } -void dprint(const float* const p, const int size) -{ - for(int i = 0; i < size; i++) - printf("%f ", (double) p[i]); - printf("\n"); -} - -typedef struct -{ - int i; - float val; -} -Index; - -Index ixmax(const float* const p, const int size) -{ - Index ix; - ix.val = -FLT_MAX; - for(int i = 0; i < size; i++) - if(p[i] > ix.val) - ix.val = p[ix.i = i]; - return ix; -} - -void dploop(const Tinn tinn, const Data data) -{ - SDL_Renderer* renderer; - SDL_Window* window; - #define W 16 - #define H 16 - #define S 20 - const int xres = W * S; - const int yres = H * S; - SDL_CreateWindowAndRenderer(xres, yres, 0, &window, &renderer); - static float digit[W * H]; - Input input = { false, 0, 0 }; - for(SDL_Event e; true; SDL_PollEvent(&e)) - { - if(e.type == SDL_QUIT) - exit(1); - const int button = SDL_GetMouseState(&input.x, &input.y); - // Draw digit. - if(button) - { - const int xx = input.x / S; - const int yy = input.y / S; - const int w = 2; - for(int i = 0; i < w; i++) - for(int j = 0; j < w; j++) - digit[(xx + i) + W * (yy + j)] = 1.0f; - input.down = true; - } - // Predict. - else - { - if(input.down) - { - const float* const pred = xpredict(tinn, digit); - dprint(pred, data.nops); - const Index ix = ixmax(pred, data.nops); - if(ix.val > 0.9f) - printf("%d\n", ix.i); - else - printf("I do not recognize that digit\n"); - memset((void*) digit, 0, sizeof(digit)); - } - input.down = false; - } - // Draw digit to screen. - for(int x = 0; x < xres; x++) - for(int y = 0; y < yres; y++) - { - const int xx = x / S; - const int yy = y / S; - digit[xx + W * yy] == 1.0f ? - SDL_SetRenderDrawColor(renderer, 0xFF, 0xFF, 0xFF, 0xFF): - SDL_SetRenderDrawColor(renderer, 0x00, 0x00, 0x00, 0xFF); - SDL_RenderDrawPoint(renderer, x, y); - } - SDL_RenderPresent(renderer); - SDL_Delay(15); - } - #undef W - #undef H - #undef S -} - int main() { // Tinn does not seed the random number generator. @@ -242,7 +144,7 @@ int main() const Data data = build("semeion.data", nips, nops); // Train, baby, train. const Tinn tinn = xtbuild(nips, nhid, nops); - for(int i = 0; i < 200; i++) + for(int i = 0; i < 100; i++) { shuffle(data); float error = 0.0f; @@ -261,10 +163,14 @@ int main() // This is how you load the neural network from disk. const Tinn loaded = xtload("saved.tinn"); // Now we do a prediction with the neural network we loaded from disk. - // SDL will create a window so that you can draw digits. - // Enter the draw and predict loop: - dploop(loaded, data); - // All done. Let's clean up + // Ideally, we would also load a testing set to make the prediction with, + // but for the sake of brevity here we just reuse the training set from earlier. + const float* const in = data.in[0]; + const float* const tg = data.tg[0]; + const float* const pd = xpredict(loaded, in); + for(int i = 0; i < data.nops; i++) { printf("%f ", (double) tg[i]); } printf("\n"); + for(int i = 0; i < data.nops; i++) { printf("%f ", (double) pd[i]); } printf("\n"); + // All done. Let's clean up. xtfree(loaded); dfree(data); return 0; From 268ba8394a884c5e0db647914cefe0a06eeeec23 Mon Sep 17 00:00:00 2001 From: Gustav Louw Date: Mon, 2 Apr 2018 15:37:26 -0700 Subject: [PATCH 10/12] calloc and fopen asserts --- Tinn.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/Tinn.c b/Tinn.c index af83896..e76c76a 100644 --- a/Tinn.c +++ b/Tinn.c @@ -1,6 +1,7 @@ #include "Tinn.h" #include +#include #include #include @@ -111,11 +112,11 @@ Tinn xtbuild(int nips, int nhid, int nops) // Tinn only supports one hidden layer so there are two biases. t.nb = 2; t.nw = nhid * (nips + nops); - t.w = (float*) calloc(t.nw, sizeof(*t.w)); + assert(t.w = (float*) calloc(t.nw, sizeof(*t.w))); t.x = t.w + nhid * nips; - t.b = (float*) calloc(t.nb, sizeof(*t.b)); - t.h = (float*) calloc(nhid, sizeof(*t.h)); - t.o = (float*) calloc(nops, sizeof(*t.o)); + assert(t.b = (float*) calloc(t.nb, sizeof(*t.b))); + assert(t.h = (float*) calloc(nhid, sizeof(*t.h))); + assert(t.o = (float*) calloc(nops, sizeof(*t.o))); t.nips = nips; t.nhid = nhid; t.nops = nops; @@ -125,7 +126,8 @@ Tinn xtbuild(int nips, int nhid, int nops) void xtsave(const Tinn t, const char* path) { - FILE* const file = fopen(path, "w"); + FILE* file; + assert(file = fopen(path, "w")); // Header. fprintf(file, "%d %d %d\n", t.nips, t.nhid, t.nops); // Biases and weights. @@ -136,7 +138,8 @@ void xtsave(const Tinn t, const char* path) Tinn xtload(const char* path) { - FILE* const file = fopen(path, "r"); + FILE* file; + assert(file = fopen(path, "r")); int nips = 0; int nhid = 0; int nops = 0; From 72dd9ed70a5bcb697b1261037592c391a71fa82c Mon Sep 17 00:00:00 2001 From: Gustav Louw Date: Mon, 2 Apr 2018 15:54:33 -0700 Subject: [PATCH 11/12] removed asserts and added ecalloc and efopen instead --- Tinn.c | 44 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 9 deletions(-) diff --git a/Tinn.c b/Tinn.c index e76c76a..e1b346b 100644 --- a/Tinn.c +++ b/Tinn.c @@ -1,7 +1,7 @@ #include "Tinn.h" +#include #include -#include #include #include @@ -93,6 +93,34 @@ static void twrand(const Tinn t) for(int i = 0; i < t.nb; i++) t.b[i] = frand() - 0.5f; } +// Prints a message and exits. +static void bomb(const char* const message, ...) +{ + va_list args; + va_start(args, message); + vprintf(message, args); + va_end(args); + exit(1); +} + +// Fail safe file opening. +static FILE* efopen(const char* const pathname, const char* const mode) +{ + FILE* const file = fopen(pathname, mode); + if(file == NULL) + bomb("failure: fopen(\"%s\", \"%s\")\n", pathname, mode); + return file; +} + +// Fail safe clear allocation. +static void* ecalloc(const size_t nmemb, const size_t size) +{ + void* const mem = calloc(nmemb, size); + if(mem == NULL) + bomb("failure: calloc(%d, %d)\n", nmemb, size); + return mem; +} + float* xpredict(const Tinn t, const float* in) { forewards(t, in); @@ -112,11 +140,11 @@ Tinn xtbuild(int nips, int nhid, int nops) // Tinn only supports one hidden layer so there are two biases. t.nb = 2; t.nw = nhid * (nips + nops); - assert(t.w = (float*) calloc(t.nw, sizeof(*t.w))); + t.w = (float*) ecalloc(t.nw, sizeof(*t.w)); t.x = t.w + nhid * nips; - assert(t.b = (float*) calloc(t.nb, sizeof(*t.b))); - assert(t.h = (float*) calloc(nhid, sizeof(*t.h))); - assert(t.o = (float*) calloc(nops, sizeof(*t.o))); + t.b = (float*) ecalloc(t.nb, sizeof(*t.b)); + t.h = (float*) ecalloc(nhid, sizeof(*t.h)); + t.o = (float*) ecalloc(nops, sizeof(*t.o)); t.nips = nips; t.nhid = nhid; t.nops = nops; @@ -126,8 +154,7 @@ Tinn xtbuild(int nips, int nhid, int nops) void xtsave(const Tinn t, const char* path) { - FILE* file; - assert(file = fopen(path, "w")); + FILE* file = efopen(path, "w"); // Header. fprintf(file, "%d %d %d\n", t.nips, t.nhid, t.nops); // Biases and weights. @@ -138,8 +165,7 @@ void xtsave(const Tinn t, const char* path) Tinn xtload(const char* path) { - FILE* file; - assert(file = fopen(path, "r")); + FILE* file = efopen(path, "r"); int nips = 0; int nhid = 0; int nops = 0; From 2617d72ec523d4ffd89f0e6e6d6d80056326db8b Mon Sep 17 00:00:00 2001 From: Gustav Louw Date: Mon, 2 Apr 2018 15:58:25 -0700 Subject: [PATCH 12/12] more const --- Tinn.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Tinn.c b/Tinn.c index e1b346b..e5bf2da 100644 --- a/Tinn.c +++ b/Tinn.c @@ -154,7 +154,7 @@ Tinn xtbuild(int nips, int nhid, int nops) void xtsave(const Tinn t, const char* path) { - FILE* file = efopen(path, "w"); + FILE* const file = efopen(path, "w"); // Header. fprintf(file, "%d %d %d\n", t.nips, t.nhid, t.nops); // Biases and weights. @@ -165,7 +165,7 @@ void xtsave(const Tinn t, const char* path) Tinn xtload(const char* path) { - FILE* file = efopen(path, "r"); + FILE* const file = efopen(path, "r"); int nips = 0; int nhid = 0; int nops = 0;