mirror of
https://github.com/codeplea/genann
synced 2024-11-21 14:11:17 +03:00
Initial commit
This commit is contained in:
commit
850f080045
22
LICENSE.md
Normal file
22
LICENSE.md
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
GENANN - Minimal C Artificial Neural Network
|
||||||
|
|
||||||
|
Copyright (c) 2015, 2016 Lewis Van Winkle
|
||||||
|
|
||||||
|
http://CodePlea.com
|
||||||
|
|
||||||
|
This software is provided 'as-is', without any express or implied
|
||||||
|
warranty. In no event will the authors be held liable for any damages
|
||||||
|
arising from the use of this software.
|
||||||
|
|
||||||
|
Permission is granted to anyone to use this software for any purpose,
|
||||||
|
including commercial applications, and to alter it and redistribute it
|
||||||
|
freely, subject to the following restrictions:
|
||||||
|
|
||||||
|
1. The origin of this software must not be misrepresented; you must not
|
||||||
|
claim that you wrote the original software. If you use this software
|
||||||
|
in a product, an acknowledgement in the product documentation would be
|
||||||
|
appreciated but is not required.
|
||||||
|
2. Altered source versions must be plainly marked as such, and must not be
|
||||||
|
misrepresented as being the original software.
|
||||||
|
3. This notice may not be removed or altered from any source distribution.
|
||||||
|
|
33
Makefile
Normal file
33
Makefile
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
CC = gcc
|
||||||
|
CCFLAGS = -ansi -Wall -Wshadow -O2 -g
|
||||||
|
LFLAGS = -lm
|
||||||
|
|
||||||
|
|
||||||
|
all: test example1 example2 example3 example4
|
||||||
|
|
||||||
|
|
||||||
|
test: test.o genann.o
|
||||||
|
$(CC) $(CCFLAGS) -o $@ $^ $(LFLAGS)
|
||||||
|
./$@
|
||||||
|
|
||||||
|
|
||||||
|
example1: example1.o genann.o
|
||||||
|
$(CC) $(CCFLAGS) -o $@ $^ $(LFLAGS)
|
||||||
|
|
||||||
|
example2: example2.o genann.o
|
||||||
|
$(CC) $(CCFLAGS) -o $@ $^ $(LFLAGS)
|
||||||
|
|
||||||
|
example3: example3.o genann.o
|
||||||
|
$(CC) $(CCFLAGS) -o $@ $^ $(LFLAGS)
|
||||||
|
|
||||||
|
example4: example4.o genann.o
|
||||||
|
$(CC) $(CCFLAGS) -o $@ $^ $(LFLAGS)
|
||||||
|
|
||||||
|
.c.o:
|
||||||
|
$(CC) -c $(CCFLAGS) $< -o $@
|
||||||
|
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm *.o
|
||||||
|
rm *.exe
|
||||||
|
rm persist.txt
|
133
README.md
Normal file
133
README.md
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
#GENANN
|
||||||
|
|
||||||
|
GENANN is a very minimal library for training and using feedforward artificial neural
|
||||||
|
networks (ANN) in C. Its primary focus is on being simple, fast, and hackable. It achieves
|
||||||
|
this by providing only the necessary functions and little extra.
|
||||||
|
|
||||||
|
##Features
|
||||||
|
|
||||||
|
- **ANSI C with no dependencies**.
|
||||||
|
- Contained in a single source code and header file.
|
||||||
|
- Simple.
|
||||||
|
- Fast and thread-safe.
|
||||||
|
- Easily extendible.
|
||||||
|
- Implements backpropagation training.
|
||||||
|
- Compatible with training by alternative methods (classic optimization, genetic algorithms, etc)
|
||||||
|
- Includes examples and test suite.
|
||||||
|
- Released under the zlib license - free for nearly any use.
|
||||||
|
|
||||||
|
##Example Code
|
||||||
|
|
||||||
|
Four example programs are included.
|
||||||
|
|
||||||
|
- `example1.c` - Trains an ANN on the XOR function using backpropagation.
|
||||||
|
- `example2.c` - Trains an ANN on the XOR function using random search.
|
||||||
|
- `example3.c` - Loads and runs an ANN from a file.
|
||||||
|
- `example4.c` - Trains an ANN on the [IRIS data-set](https://archive.ics.uci.edu/ml/datasets/Iris) using backpropagation.
|
||||||
|
|
||||||
|
##Quick Example
|
||||||
|
|
||||||
|
Here we create an ANN, train it on a set of labeled data using backpropagation,
|
||||||
|
ask it to predict on a test data point, and then free it:
|
||||||
|
|
||||||
|
```C
|
||||||
|
#include "genann.h"
|
||||||
|
|
||||||
|
/* New network with 5 inputs,
|
||||||
|
* 2 hidden layer of 10 neurons each,
|
||||||
|
* and 1 output. */
|
||||||
|
GENANN *ann = genann_init(5, 2, 10, 1);
|
||||||
|
|
||||||
|
/* Learn on the training set. */
|
||||||
|
for (i = 0; i < 300; ++i) {
|
||||||
|
for (j = 0; j < 100; ++j)
|
||||||
|
genann_train(ann, training_data_input[j], training_data_output[j], 0.1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Run the network and see what it predicts. */
|
||||||
|
printf("Output for the first test data point is: %f\n", *genann_run(ann, test_data_input[0]));
|
||||||
|
|
||||||
|
genann_free(ann);
|
||||||
|
```
|
||||||
|
|
||||||
|
Not that this example is to show API usage, it is not showing good machine
|
||||||
|
learning techniques. In a real application you would likely want to learn on
|
||||||
|
the test data in a random order. You would also want to monitor the learning to
|
||||||
|
prevent over-fitting.
|
||||||
|
|
||||||
|
|
||||||
|
##Usage
|
||||||
|
|
||||||
|
###Creating and Freeing ANNs
|
||||||
|
```C
|
||||||
|
GENANN *genann_init(int inputs, int hidden_layers, int hidden, int outputs);
|
||||||
|
GENANN *genann_copy(GENANN const *ann);
|
||||||
|
void genann_free(GENANN *ann);
|
||||||
|
```
|
||||||
|
|
||||||
|
Creating a new ANN is done with the `genann_init()` function. It's arguments
|
||||||
|
are the number of inputs, the number of hidden layers, the number of neurons in
|
||||||
|
each hidden layer, and the number of outputs. It returns a `GENANN` struct pointer.
|
||||||
|
|
||||||
|
Calling `genann_copy()` will create a deep-copy of an existing GENANN struct.
|
||||||
|
|
||||||
|
Call `genann_free()` when you're finished with an ANN returned by `genann_init()`.
|
||||||
|
|
||||||
|
|
||||||
|
###Training ANNs
|
||||||
|
```C
|
||||||
|
void genann_train(GENANN const *ann, double const *inputs, double const *desired_outputs, double learning_rate);
|
||||||
|
```
|
||||||
|
|
||||||
|
`genann_train()` will preform one update using standard backpropogation. It
|
||||||
|
should be called by passing in an array of inputs, an array of expected output,
|
||||||
|
and a learning rate. See *example1.c* for an example of learning with
|
||||||
|
backpropogation.
|
||||||
|
|
||||||
|
A primary design goal of GENANN was to store all the network weights in one
|
||||||
|
contigious block of memory. This makes it easy and efficient to train the
|
||||||
|
network weights directly using direct-search numeric optimizion algorthims,
|
||||||
|
such as [Hill Climbing](https://en.wikipedia.org/wiki/Hill_climbing),
|
||||||
|
[the Genetic Algorithm](https://en.wikipedia.org/wiki/Genetic_algorithm), [Simulated
|
||||||
|
Annealing](https://en.wikipedia.org/wiki/Simulated_annealing), etc.
|
||||||
|
These methods can be used by searching on the ANN's weights directly.
|
||||||
|
Every `GENANN` struct contains the members `int total_weights;` and
|
||||||
|
`double *weight;`. `*weight` points to an array of `total_weights`
|
||||||
|
size which contains all weights used by the ANN. See *example2.c* for
|
||||||
|
an example of training using random hill climbing search.
|
||||||
|
|
||||||
|
###Saving and Loading ANNs
|
||||||
|
|
||||||
|
```C
|
||||||
|
GENANN *genann_read(FILE *in);
|
||||||
|
void genann_write(GENANN const *ann, FILE *out);
|
||||||
|
```
|
||||||
|
|
||||||
|
GENANN provides the `genann_read()` and `genann_write()` functions for loading or saving an ANN in a text-based format.
|
||||||
|
|
||||||
|
###Evaluating
|
||||||
|
|
||||||
|
```C
|
||||||
|
double const *genann_run(GENANN const *ann, double const *inputs);
|
||||||
|
```
|
||||||
|
|
||||||
|
Call `genann_run()` on a trained ANN to run a feed-forward pass on a given set of inputs. `genann_run()`
|
||||||
|
will provide a pointer to the array of predicted outputs (of `ann->outputs` length).
|
||||||
|
|
||||||
|
##Extra Resources
|
||||||
|
|
||||||
|
The [comp.ai.neural-nets
|
||||||
|
FAQ](http://www.faqs.org/faqs/ai-faq/neural-nets/part1/) is an excellent
|
||||||
|
resource for an introduction to artificial neural networks.
|
||||||
|
|
||||||
|
If you're looking for a heavier, more opinionated neural network library in C,
|
||||||
|
I highly recommend the [FANN library](http://leenissen.dk/fann/wp/). Another
|
||||||
|
good library is Peter van Rossum's [Lightweight Neural
|
||||||
|
Network](http://lwneuralnet.sourceforge.net/), which despite its name, is
|
||||||
|
heavier and has more features than GENANN.
|
||||||
|
|
||||||
|
##Hints
|
||||||
|
|
||||||
|
- All functions start with `genann_`.
|
||||||
|
- The code is simple. Dig in and change things.
|
||||||
|
|
150
example/iris.data
Normal file
150
example/iris.data
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
5.1,3.5,1.4,0.2,Iris-setosa
|
||||||
|
4.9,3.0,1.4,0.2,Iris-setosa
|
||||||
|
4.7,3.2,1.3,0.2,Iris-setosa
|
||||||
|
4.6,3.1,1.5,0.2,Iris-setosa
|
||||||
|
5.0,3.6,1.4,0.2,Iris-setosa
|
||||||
|
5.4,3.9,1.7,0.4,Iris-setosa
|
||||||
|
4.6,3.4,1.4,0.3,Iris-setosa
|
||||||
|
5.0,3.4,1.5,0.2,Iris-setosa
|
||||||
|
4.4,2.9,1.4,0.2,Iris-setosa
|
||||||
|
4.9,3.1,1.5,0.1,Iris-setosa
|
||||||
|
5.4,3.7,1.5,0.2,Iris-setosa
|
||||||
|
4.8,3.4,1.6,0.2,Iris-setosa
|
||||||
|
4.8,3.0,1.4,0.1,Iris-setosa
|
||||||
|
4.3,3.0,1.1,0.1,Iris-setosa
|
||||||
|
5.8,4.0,1.2,0.2,Iris-setosa
|
||||||
|
5.7,4.4,1.5,0.4,Iris-setosa
|
||||||
|
5.4,3.9,1.3,0.4,Iris-setosa
|
||||||
|
5.1,3.5,1.4,0.3,Iris-setosa
|
||||||
|
5.7,3.8,1.7,0.3,Iris-setosa
|
||||||
|
5.1,3.8,1.5,0.3,Iris-setosa
|
||||||
|
5.4,3.4,1.7,0.2,Iris-setosa
|
||||||
|
5.1,3.7,1.5,0.4,Iris-setosa
|
||||||
|
4.6,3.6,1.0,0.2,Iris-setosa
|
||||||
|
5.1,3.3,1.7,0.5,Iris-setosa
|
||||||
|
4.8,3.4,1.9,0.2,Iris-setosa
|
||||||
|
5.0,3.0,1.6,0.2,Iris-setosa
|
||||||
|
5.0,3.4,1.6,0.4,Iris-setosa
|
||||||
|
5.2,3.5,1.5,0.2,Iris-setosa
|
||||||
|
5.2,3.4,1.4,0.2,Iris-setosa
|
||||||
|
4.7,3.2,1.6,0.2,Iris-setosa
|
||||||
|
4.8,3.1,1.6,0.2,Iris-setosa
|
||||||
|
5.4,3.4,1.5,0.4,Iris-setosa
|
||||||
|
5.2,4.1,1.5,0.1,Iris-setosa
|
||||||
|
5.5,4.2,1.4,0.2,Iris-setosa
|
||||||
|
4.9,3.1,1.5,0.1,Iris-setosa
|
||||||
|
5.0,3.2,1.2,0.2,Iris-setosa
|
||||||
|
5.5,3.5,1.3,0.2,Iris-setosa
|
||||||
|
4.9,3.1,1.5,0.1,Iris-setosa
|
||||||
|
4.4,3.0,1.3,0.2,Iris-setosa
|
||||||
|
5.1,3.4,1.5,0.2,Iris-setosa
|
||||||
|
5.0,3.5,1.3,0.3,Iris-setosa
|
||||||
|
4.5,2.3,1.3,0.3,Iris-setosa
|
||||||
|
4.4,3.2,1.3,0.2,Iris-setosa
|
||||||
|
5.0,3.5,1.6,0.6,Iris-setosa
|
||||||
|
5.1,3.8,1.9,0.4,Iris-setosa
|
||||||
|
4.8,3.0,1.4,0.3,Iris-setosa
|
||||||
|
5.1,3.8,1.6,0.2,Iris-setosa
|
||||||
|
4.6,3.2,1.4,0.2,Iris-setosa
|
||||||
|
5.3,3.7,1.5,0.2,Iris-setosa
|
||||||
|
5.0,3.3,1.4,0.2,Iris-setosa
|
||||||
|
7.0,3.2,4.7,1.4,Iris-versicolor
|
||||||
|
6.4,3.2,4.5,1.5,Iris-versicolor
|
||||||
|
6.9,3.1,4.9,1.5,Iris-versicolor
|
||||||
|
5.5,2.3,4.0,1.3,Iris-versicolor
|
||||||
|
6.5,2.8,4.6,1.5,Iris-versicolor
|
||||||
|
5.7,2.8,4.5,1.3,Iris-versicolor
|
||||||
|
6.3,3.3,4.7,1.6,Iris-versicolor
|
||||||
|
4.9,2.4,3.3,1.0,Iris-versicolor
|
||||||
|
6.6,2.9,4.6,1.3,Iris-versicolor
|
||||||
|
5.2,2.7,3.9,1.4,Iris-versicolor
|
||||||
|
5.0,2.0,3.5,1.0,Iris-versicolor
|
||||||
|
5.9,3.0,4.2,1.5,Iris-versicolor
|
||||||
|
6.0,2.2,4.0,1.0,Iris-versicolor
|
||||||
|
6.1,2.9,4.7,1.4,Iris-versicolor
|
||||||
|
5.6,2.9,3.6,1.3,Iris-versicolor
|
||||||
|
6.7,3.1,4.4,1.4,Iris-versicolor
|
||||||
|
5.6,3.0,4.5,1.5,Iris-versicolor
|
||||||
|
5.8,2.7,4.1,1.0,Iris-versicolor
|
||||||
|
6.2,2.2,4.5,1.5,Iris-versicolor
|
||||||
|
5.6,2.5,3.9,1.1,Iris-versicolor
|
||||||
|
5.9,3.2,4.8,1.8,Iris-versicolor
|
||||||
|
6.1,2.8,4.0,1.3,Iris-versicolor
|
||||||
|
6.3,2.5,4.9,1.5,Iris-versicolor
|
||||||
|
6.1,2.8,4.7,1.2,Iris-versicolor
|
||||||
|
6.4,2.9,4.3,1.3,Iris-versicolor
|
||||||
|
6.6,3.0,4.4,1.4,Iris-versicolor
|
||||||
|
6.8,2.8,4.8,1.4,Iris-versicolor
|
||||||
|
6.7,3.0,5.0,1.7,Iris-versicolor
|
||||||
|
6.0,2.9,4.5,1.5,Iris-versicolor
|
||||||
|
5.7,2.6,3.5,1.0,Iris-versicolor
|
||||||
|
5.5,2.4,3.8,1.1,Iris-versicolor
|
||||||
|
5.5,2.4,3.7,1.0,Iris-versicolor
|
||||||
|
5.8,2.7,3.9,1.2,Iris-versicolor
|
||||||
|
6.0,2.7,5.1,1.6,Iris-versicolor
|
||||||
|
5.4,3.0,4.5,1.5,Iris-versicolor
|
||||||
|
6.0,3.4,4.5,1.6,Iris-versicolor
|
||||||
|
6.7,3.1,4.7,1.5,Iris-versicolor
|
||||||
|
6.3,2.3,4.4,1.3,Iris-versicolor
|
||||||
|
5.6,3.0,4.1,1.3,Iris-versicolor
|
||||||
|
5.5,2.5,4.0,1.3,Iris-versicolor
|
||||||
|
5.5,2.6,4.4,1.2,Iris-versicolor
|
||||||
|
6.1,3.0,4.6,1.4,Iris-versicolor
|
||||||
|
5.8,2.6,4.0,1.2,Iris-versicolor
|
||||||
|
5.0,2.3,3.3,1.0,Iris-versicolor
|
||||||
|
5.6,2.7,4.2,1.3,Iris-versicolor
|
||||||
|
5.7,3.0,4.2,1.2,Iris-versicolor
|
||||||
|
5.7,2.9,4.2,1.3,Iris-versicolor
|
||||||
|
6.2,2.9,4.3,1.3,Iris-versicolor
|
||||||
|
5.1,2.5,3.0,1.1,Iris-versicolor
|
||||||
|
5.7,2.8,4.1,1.3,Iris-versicolor
|
||||||
|
6.3,3.3,6.0,2.5,Iris-virginica
|
||||||
|
5.8,2.7,5.1,1.9,Iris-virginica
|
||||||
|
7.1,3.0,5.9,2.1,Iris-virginica
|
||||||
|
6.3,2.9,5.6,1.8,Iris-virginica
|
||||||
|
6.5,3.0,5.8,2.2,Iris-virginica
|
||||||
|
7.6,3.0,6.6,2.1,Iris-virginica
|
||||||
|
4.9,2.5,4.5,1.7,Iris-virginica
|
||||||
|
7.3,2.9,6.3,1.8,Iris-virginica
|
||||||
|
6.7,2.5,5.8,1.8,Iris-virginica
|
||||||
|
7.2,3.6,6.1,2.5,Iris-virginica
|
||||||
|
6.5,3.2,5.1,2.0,Iris-virginica
|
||||||
|
6.4,2.7,5.3,1.9,Iris-virginica
|
||||||
|
6.8,3.0,5.5,2.1,Iris-virginica
|
||||||
|
5.7,2.5,5.0,2.0,Iris-virginica
|
||||||
|
5.8,2.8,5.1,2.4,Iris-virginica
|
||||||
|
6.4,3.2,5.3,2.3,Iris-virginica
|
||||||
|
6.5,3.0,5.5,1.8,Iris-virginica
|
||||||
|
7.7,3.8,6.7,2.2,Iris-virginica
|
||||||
|
7.7,2.6,6.9,2.3,Iris-virginica
|
||||||
|
6.0,2.2,5.0,1.5,Iris-virginica
|
||||||
|
6.9,3.2,5.7,2.3,Iris-virginica
|
||||||
|
5.6,2.8,4.9,2.0,Iris-virginica
|
||||||
|
7.7,2.8,6.7,2.0,Iris-virginica
|
||||||
|
6.3,2.7,4.9,1.8,Iris-virginica
|
||||||
|
6.7,3.3,5.7,2.1,Iris-virginica
|
||||||
|
7.2,3.2,6.0,1.8,Iris-virginica
|
||||||
|
6.2,2.8,4.8,1.8,Iris-virginica
|
||||||
|
6.1,3.0,4.9,1.8,Iris-virginica
|
||||||
|
6.4,2.8,5.6,2.1,Iris-virginica
|
||||||
|
7.2,3.0,5.8,1.6,Iris-virginica
|
||||||
|
7.4,2.8,6.1,1.9,Iris-virginica
|
||||||
|
7.9,3.8,6.4,2.0,Iris-virginica
|
||||||
|
6.4,2.8,5.6,2.2,Iris-virginica
|
||||||
|
6.3,2.8,5.1,1.5,Iris-virginica
|
||||||
|
6.1,2.6,5.6,1.4,Iris-virginica
|
||||||
|
7.7,3.0,6.1,2.3,Iris-virginica
|
||||||
|
6.3,3.4,5.6,2.4,Iris-virginica
|
||||||
|
6.4,3.1,5.5,1.8,Iris-virginica
|
||||||
|
6.0,3.0,4.8,1.8,Iris-virginica
|
||||||
|
6.9,3.1,5.4,2.1,Iris-virginica
|
||||||
|
6.7,3.1,5.6,2.4,Iris-virginica
|
||||||
|
6.9,3.1,5.1,2.3,Iris-virginica
|
||||||
|
5.8,2.7,5.1,1.9,Iris-virginica
|
||||||
|
6.8,3.2,5.9,2.3,Iris-virginica
|
||||||
|
6.7,3.3,5.7,2.5,Iris-virginica
|
||||||
|
6.7,3.0,5.2,2.3,Iris-virginica
|
||||||
|
6.3,2.5,5.0,1.9,Iris-virginica
|
||||||
|
6.5,3.0,5.2,2.0,Iris-virginica
|
||||||
|
6.2,3.4,5.4,2.3,Iris-virginica
|
||||||
|
5.9,3.0,5.1,1.8,Iris-virginica
|
69
example/iris.names
Normal file
69
example/iris.names
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
1. Title: Iris Plants Database
|
||||||
|
Updated Sept 21 by C.Blake - Added discrepency information
|
||||||
|
|
||||||
|
2. Sources:
|
||||||
|
(a) Creator: R.A. Fisher
|
||||||
|
(b) Donor: Michael Marshall (MARSHALL%PLU@io.arc.nasa.gov)
|
||||||
|
(c) Date: July, 1988
|
||||||
|
|
||||||
|
3. Past Usage:
|
||||||
|
- Publications: too many to mention!!! Here are a few.
|
||||||
|
1. Fisher,R.A. "The use of multiple measurements in taxonomic problems"
|
||||||
|
Annual Eugenics, 7, Part II, 179-188 (1936); also in "Contributions
|
||||||
|
to Mathematical Statistics" (John Wiley, NY, 1950).
|
||||||
|
2. Duda,R.O., & Hart,P.E. (1973) Pattern Classification and Scene Analysis.
|
||||||
|
(Q327.D83) John Wiley & Sons. ISBN 0-471-22361-1. See page 218.
|
||||||
|
3. Dasarathy, B.V. (1980) "Nosing Around the Neighborhood: A New System
|
||||||
|
Structure and Classification Rule for Recognition in Partially Exposed
|
||||||
|
Environments". IEEE Transactions on Pattern Analysis and Machine
|
||||||
|
Intelligence, Vol. PAMI-2, No. 1, 67-71.
|
||||||
|
-- Results:
|
||||||
|
-- very low misclassification rates (0% for the setosa class)
|
||||||
|
4. Gates, G.W. (1972) "The Reduced Nearest Neighbor Rule". IEEE
|
||||||
|
Transactions on Information Theory, May 1972, 431-433.
|
||||||
|
-- Results:
|
||||||
|
-- very low misclassification rates again
|
||||||
|
5. See also: 1988 MLC Proceedings, 54-64. Cheeseman et al's AUTOCLASS II
|
||||||
|
conceptual clustering system finds 3 classes in the data.
|
||||||
|
|
||||||
|
4. Relevant Information:
|
||||||
|
--- This is perhaps the best known database to be found in the pattern
|
||||||
|
recognition literature. Fisher's paper is a classic in the field
|
||||||
|
and is referenced frequently to this day. (See Duda & Hart, for
|
||||||
|
example.) The data set contains 3 classes of 50 instances each,
|
||||||
|
where each class refers to a type of iris plant. One class is
|
||||||
|
linearly separable from the other 2; the latter are NOT linearly
|
||||||
|
separable from each other.
|
||||||
|
--- Predicted attribute: class of iris plant.
|
||||||
|
--- This is an exceedingly simple domain.
|
||||||
|
--- This data differs from the data presented in Fishers article
|
||||||
|
(identified by Steve Chadwick, spchadwick@espeedaz.net )
|
||||||
|
The 35th sample should be: 4.9,3.1,1.5,0.2,"Iris-setosa"
|
||||||
|
where the error is in the fourth feature.
|
||||||
|
The 38th sample: 4.9,3.6,1.4,0.1,"Iris-setosa"
|
||||||
|
where the errors are in the second and third features.
|
||||||
|
|
||||||
|
5. Number of Instances: 150 (50 in each of three classes)
|
||||||
|
|
||||||
|
6. Number of Attributes: 4 numeric, predictive attributes and the class
|
||||||
|
|
||||||
|
7. Attribute Information:
|
||||||
|
1. sepal length in cm
|
||||||
|
2. sepal width in cm
|
||||||
|
3. petal length in cm
|
||||||
|
4. petal width in cm
|
||||||
|
5. class:
|
||||||
|
-- Iris Setosa
|
||||||
|
-- Iris Versicolour
|
||||||
|
-- Iris Virginica
|
||||||
|
|
||||||
|
8. Missing Attribute Values: None
|
||||||
|
|
||||||
|
Summary Statistics:
|
||||||
|
Min Max Mean SD Class Correlation
|
||||||
|
sepal length: 4.3 7.9 5.84 0.83 0.7826
|
||||||
|
sepal width: 2.0 4.4 3.05 0.43 -0.4194
|
||||||
|
petal length: 1.0 6.9 3.76 1.76 0.9490 (high!)
|
||||||
|
petal width: 0.1 2.5 1.20 0.76 0.9565 (high!)
|
||||||
|
|
||||||
|
9. Class Distribution: 33.3% for each of 3 classes.
|
1
example/xor.ann
Normal file
1
example/xor.ann
Normal file
@ -0,0 +1 @@
|
|||||||
|
2 1 2 1 -1.777 -5.734 -6.029 -4.460 -3.261 -3.172 2.444 -6.581 5.826
|
35
example1.c
Normal file
35
example1.c
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
#include <stdio.h>
|
||||||
|
#include "genann.h"
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
printf("GENANN example 1.\n");
|
||||||
|
printf("Train a small ANN to the XOR function using backpropagation.\n");
|
||||||
|
|
||||||
|
/* Input and expected out data for the XOR function. */
|
||||||
|
const double input[4][2] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
|
||||||
|
const double output[4] = {0, 1, 1, 0};
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* New network with 2 inputs,
|
||||||
|
* 1 hidden layer of 2 neurons,
|
||||||
|
* and 1 output. */
|
||||||
|
GENANN *ann = genann_init(2, 1, 2, 1);
|
||||||
|
|
||||||
|
/* Train on the four labeled data points many times. */
|
||||||
|
for (i = 0; i < 300; ++i) {
|
||||||
|
genann_train(ann, input[0], output + 0, 3);
|
||||||
|
genann_train(ann, input[1], output + 1, 3);
|
||||||
|
genann_train(ann, input[2], output + 2, 3);
|
||||||
|
genann_train(ann, input[3], output + 3, 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Run the network and see what it predicts. */
|
||||||
|
printf("Output for [%1.f, %1.f] is %1.f.\n", input[0][0], input[0][1], *genann_run(ann, input[0]));
|
||||||
|
printf("Output for [%1.f, %1.f] is %1.f.\n", input[1][0], input[1][1], *genann_run(ann, input[1]));
|
||||||
|
printf("Output for [%1.f, %1.f] is %1.f.\n", input[2][0], input[2][1], *genann_run(ann, input[2]));
|
||||||
|
printf("Output for [%1.f, %1.f] is %1.f.\n", input[3][0], input[3][1], *genann_run(ann, input[3]));
|
||||||
|
|
||||||
|
genann_free(ann);
|
||||||
|
return 0;
|
||||||
|
}
|
67
example2.c
Normal file
67
example2.c
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <math.h>
|
||||||
|
#include "genann.h"
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
printf("GENANN example 2.\n");
|
||||||
|
printf("Train a small ANN to the XOR function using random search.\n");
|
||||||
|
|
||||||
|
/* Input and expected out data for the XOR function. */
|
||||||
|
const double input[4][2] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
|
||||||
|
const double output[4] = {0, 1, 1, 0};
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* New network with 2 inputs,
|
||||||
|
* 1 hidden layer of 2 neurons,
|
||||||
|
* and 1 output. */
|
||||||
|
GENANN *ann = genann_init(2, 1, 2, 1);
|
||||||
|
|
||||||
|
double err;
|
||||||
|
double last_err = 1000;
|
||||||
|
int count = 0;
|
||||||
|
|
||||||
|
do {
|
||||||
|
++count;
|
||||||
|
if (count % 1000 == 0) {
|
||||||
|
/* We're stuck, start over. */
|
||||||
|
genann_randomize(ann);
|
||||||
|
}
|
||||||
|
|
||||||
|
GENANN *save = genann_copy(ann);
|
||||||
|
|
||||||
|
/* Take a random guess at the ANN weights. */
|
||||||
|
for (i = 0; i < ann->total_weights; ++i) {
|
||||||
|
ann->weight[i] += ((double)rand())/RAND_MAX-0.5;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* See how we did. */
|
||||||
|
err = 0;
|
||||||
|
err += pow(*genann_run(ann, input[0]) - output[0], 2.0);
|
||||||
|
err += pow(*genann_run(ann, input[1]) - output[1], 2.0);
|
||||||
|
err += pow(*genann_run(ann, input[2]) - output[2], 2.0);
|
||||||
|
err += pow(*genann_run(ann, input[3]) - output[3], 2.0);
|
||||||
|
|
||||||
|
/* Keep these weights if they're an improvement. */
|
||||||
|
if (err < last_err) {
|
||||||
|
genann_free(save);
|
||||||
|
last_err = err;
|
||||||
|
} else {
|
||||||
|
genann_free(ann);
|
||||||
|
ann = save;
|
||||||
|
}
|
||||||
|
|
||||||
|
} while (err > 0.01);
|
||||||
|
|
||||||
|
printf("Finished in %d loops.\n", count);
|
||||||
|
|
||||||
|
/* Run the network and see what it predicts. */
|
||||||
|
printf("Output for [%1.f, %1.f] is %1.f.\n", input[0][0], input[0][1], *genann_run(ann, input[0]));
|
||||||
|
printf("Output for [%1.f, %1.f] is %1.f.\n", input[1][0], input[1][1], *genann_run(ann, input[1]));
|
||||||
|
printf("Output for [%1.f, %1.f] is %1.f.\n", input[2][0], input[2][1], *genann_run(ann, input[2]));
|
||||||
|
printf("Output for [%1.f, %1.f] is %1.f.\n", input[3][0], input[3][1], *genann_run(ann, input[3]));
|
||||||
|
|
||||||
|
genann_free(ann);
|
||||||
|
return 0;
|
||||||
|
}
|
39
example3.c
Normal file
39
example3.c
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include "genann.h"
|
||||||
|
|
||||||
|
const char *save_name = "example/xor.ann";
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
printf("GENANN example 3.\n");
|
||||||
|
printf("Load a saved ANN to solve the XOR function.\n");
|
||||||
|
|
||||||
|
|
||||||
|
FILE *saved = fopen(save_name, "r");
|
||||||
|
if (!saved) {
|
||||||
|
printf("Couldn't open file: %s\n", save_name);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
GENANN *ann = genann_read(saved);
|
||||||
|
fclose(saved);
|
||||||
|
|
||||||
|
if (!ann) {
|
||||||
|
printf("Error loading ANN from file.", save_name);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Input data for the XOR function. */
|
||||||
|
const double input[4][2] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
|
||||||
|
|
||||||
|
/* Run the network and see what it predicts. */
|
||||||
|
printf("Output for [%1.f, %1.f] is %1.f.\n", input[0][0], input[0][1], *genann_run(ann, input[0]));
|
||||||
|
printf("Output for [%1.f, %1.f] is %1.f.\n", input[1][0], input[1][1], *genann_run(ann, input[1]));
|
||||||
|
printf("Output for [%1.f, %1.f] is %1.f.\n", input[2][0], input[2][1], *genann_run(ann, input[2]));
|
||||||
|
printf("Output for [%1.f, %1.f] is %1.f.\n", input[3][0], input[3][1], *genann_run(ann, input[3]));
|
||||||
|
|
||||||
|
genann_free(ann);
|
||||||
|
return 0;
|
||||||
|
}
|
111
example4.c
Normal file
111
example4.c
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <math.h>
|
||||||
|
#include "genann.h"
|
||||||
|
|
||||||
|
/* This example is to illustrate how to use GENANN.
|
||||||
|
* It is NOT an example of good machine learning techniques.
|
||||||
|
*/
|
||||||
|
|
||||||
|
const char *iris_data = "example/iris.data";
|
||||||
|
|
||||||
|
double *input, *class;
|
||||||
|
int samples;
|
||||||
|
const char *class_names[] = {"Iris-setosa", "Iris-versicolor", "Iris-virginica"};
|
||||||
|
|
||||||
|
void load_data() {
|
||||||
|
/* Load the iris data-set. */
|
||||||
|
FILE *in = fopen("example/iris.data", "r");
|
||||||
|
if (!in) {
|
||||||
|
printf("Could not open file: %s\n", iris_data);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Loop through the data to get a count. */
|
||||||
|
char line[1024];
|
||||||
|
while (!feof(in) && fgets(line, 1024, in)) {
|
||||||
|
++samples;
|
||||||
|
}
|
||||||
|
fseek(in, 0, SEEK_SET);
|
||||||
|
|
||||||
|
printf("Loading %d data points from %s\n", samples, iris_data);
|
||||||
|
|
||||||
|
/* Allocate memory for input and output data. */
|
||||||
|
input = malloc(sizeof(double) * samples * 4);
|
||||||
|
class = malloc(sizeof(double) * samples * 3);
|
||||||
|
|
||||||
|
/* Read the file into our arrays. */
|
||||||
|
int i, j;
|
||||||
|
for (i = 0; i < samples; ++i) {
|
||||||
|
double *p = input + i * 4;
|
||||||
|
double *c = class + i * 3;
|
||||||
|
c[0] = c[1] = c[2] = 0.0;
|
||||||
|
|
||||||
|
fgets(line, 1024, in);
|
||||||
|
|
||||||
|
char *split = strtok(line, ",");
|
||||||
|
for (j = 0; j < 4; ++j) {
|
||||||
|
p[j] = atof(split);
|
||||||
|
split = strtok(0, ",");
|
||||||
|
}
|
||||||
|
|
||||||
|
split[strlen(split)-1] = 0;
|
||||||
|
if (strcmp(split, class_names[0]) == 0) {c[0] = 1.0;}
|
||||||
|
else if (strcmp(split, class_names[1]) == 0) {c[1] = 1.0;}
|
||||||
|
else if (strcmp(split, class_names[2]) == 0) {c[2] = 1.0;}
|
||||||
|
else {
|
||||||
|
printf("Unknown class %s.\n", split);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* printf("Data point %d is %f %f %f %f -> %f %f %f\n", i, p[0], p[1], p[2], p[3], c[0], c[1], c[2]); */
|
||||||
|
}
|
||||||
|
|
||||||
|
fclose(in);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
printf("GENANN example 4.\n");
|
||||||
|
printf("Train an ANN on the IRIS dataset using backpropagation.\n");
|
||||||
|
|
||||||
|
/* Load the data from file. */
|
||||||
|
load_data();
|
||||||
|
|
||||||
|
/* 4 inputs.
|
||||||
|
* 1 hidden layer(s) of 4 neurons.
|
||||||
|
* 3 outputs (1 per class)
|
||||||
|
*/
|
||||||
|
GENANN *ann = genann_init(4, 1, 4, 3);
|
||||||
|
|
||||||
|
int i, j;
|
||||||
|
int loops = 5000;
|
||||||
|
|
||||||
|
/* Train the network with backpropagation. */
|
||||||
|
printf("Training for %d loops over data.\n", loops);
|
||||||
|
for (i = 0; i < loops; ++i) {
|
||||||
|
for (j = 0; j < samples; ++j) {
|
||||||
|
genann_train(ann, input + j*4, class + j*3, .01);
|
||||||
|
}
|
||||||
|
/* printf("%1.2f ", xor_score(ann)); */
|
||||||
|
}
|
||||||
|
|
||||||
|
int correct = 0;
|
||||||
|
for (j = 0; j < samples; ++j) {
|
||||||
|
const double *guess = genann_run(ann, input + j*4);
|
||||||
|
if (class[j*3+0] == 1.0) {if (guess[0] > guess[1] && guess[0] > guess[2]) ++correct;}
|
||||||
|
else if (class[j*3+1] == 1.0) {if (guess[1] > guess[0] && guess[1] > guess[2]) ++correct;}
|
||||||
|
else if (class[j*3+2] == 1.0) {if (guess[2] > guess[0] && guess[2] > guess[1]) ++correct;}
|
||||||
|
else {printf("Logic error.\n"); exit(1);}
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("%d/%d correct (%0.1f%%).\n", correct, samples, (double)correct / samples * 100.0);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
genann_free(ann);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
346
genann.c
Normal file
346
genann.c
Normal file
@ -0,0 +1,346 @@
|
|||||||
|
/*
|
||||||
|
* GENANN - Minimal C Artificial Neural Network
|
||||||
|
*
|
||||||
|
* Copyright (c) 2015, 2016 Lewis Van Winkle
|
||||||
|
*
|
||||||
|
* http://CodePlea.com
|
||||||
|
*
|
||||||
|
* This software is provided 'as-is', without any express or implied
|
||||||
|
* warranty. In no event will the authors be held liable for any damages
|
||||||
|
* arising from the use of this software.
|
||||||
|
*
|
||||||
|
* Permission is granted to anyone to use this software for any purpose,
|
||||||
|
* including commercial applications, and to alter it and redistribute it
|
||||||
|
* freely, subject to the following restrictions:
|
||||||
|
*
|
||||||
|
* 1. The origin of this software must not be misrepresented; you must not
|
||||||
|
* claim that you wrote the original software. If you use this software
|
||||||
|
* in a product, an acknowledgement in the product documentation would be
|
||||||
|
* appreciated but is not required.
|
||||||
|
* 2. Altered source versions must be plainly marked as such, and must not be
|
||||||
|
* misrepresented as being the original software.
|
||||||
|
* 3. This notice may not be removed or altered from any source distribution.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "genann.h"
|
||||||
|
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <math.h>
|
||||||
|
#include <assert.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
#define LOOKUP_SIZE 4096
|
||||||
|
|
||||||
|
double genann_act_sigmoid(double a) {
|
||||||
|
if (a < -45.0) return 0;
|
||||||
|
if (a > 45.0) return 1;
|
||||||
|
return 1.0 / (1 + exp(-a));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
double genann_act_sigmoid_cached(double a) {
|
||||||
|
/* If you're optimizing for memory usage, just
|
||||||
|
* delete this entire function and replace references
|
||||||
|
* of genann_act_sigmoid_cached to genann_act_sigmoid
|
||||||
|
*/
|
||||||
|
const double min = -15.0;
|
||||||
|
const double max = 15.0;
|
||||||
|
static double interval;
|
||||||
|
static int initialized = 0;
|
||||||
|
static double lookup[LOOKUP_SIZE];
|
||||||
|
|
||||||
|
/* Calculate entire lookup table on first run. */
|
||||||
|
if (!initialized) {
|
||||||
|
interval = (max - min) / LOOKUP_SIZE;
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < LOOKUP_SIZE; ++i) {
|
||||||
|
lookup[i] = genann_act_sigmoid(min + interval * i);
|
||||||
|
}
|
||||||
|
/* This is down here to make this thread safe. */
|
||||||
|
initialized = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int i;
|
||||||
|
i = (int)((a-min)/interval+0.5);
|
||||||
|
if (i <= 0) return lookup[0];
|
||||||
|
if (i >= LOOKUP_SIZE) return lookup[LOOKUP_SIZE-1];
|
||||||
|
return lookup[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
double genann_act_threshold(double a) {
|
||||||
|
return a > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
GENANN *genann_init(int inputs, int hidden_layers, int hidden, int outputs) {
|
||||||
|
if (hidden_layers < 0) return 0;
|
||||||
|
if (inputs < 1) return 0;
|
||||||
|
if (outputs < 1) return 0;
|
||||||
|
if (hidden_layers > 0 && hidden < 1) return 0;
|
||||||
|
|
||||||
|
|
||||||
|
const int hidden_weights = hidden_layers ? (inputs+1) * hidden + (hidden_layers-1) * (hidden+1) * hidden : 0;
|
||||||
|
const int output_weights = (hidden_layers ? (hidden+1) : (inputs+1)) * outputs;
|
||||||
|
const int total_weights = (hidden_weights + output_weights);
|
||||||
|
|
||||||
|
const int total_neurons = (inputs + hidden * hidden_layers + outputs);
|
||||||
|
|
||||||
|
/* Allocate extra size for weights, outputs, and deltas. */
|
||||||
|
const int size = sizeof(GENANN) + sizeof(double) * (total_weights + total_neurons + (total_neurons - inputs));
|
||||||
|
GENANN *ret = malloc(size);
|
||||||
|
if (!ret) return 0;
|
||||||
|
|
||||||
|
ret->inputs = inputs;
|
||||||
|
ret->hidden_layers = hidden_layers;
|
||||||
|
ret->hidden = hidden;
|
||||||
|
ret->outputs = outputs;
|
||||||
|
|
||||||
|
ret->total_weights = total_weights;
|
||||||
|
ret->total_neurons = total_neurons;
|
||||||
|
|
||||||
|
/* Set pointers. */
|
||||||
|
ret->weight = (double*)((char*)ret + sizeof(GENANN));
|
||||||
|
ret->output = ret->weight + ret->total_weights;
|
||||||
|
ret->delta = ret->output + ret->total_neurons;
|
||||||
|
|
||||||
|
genann_randomize(ret);
|
||||||
|
|
||||||
|
ret->activation_hidden = genann_act_sigmoid_cached;
|
||||||
|
ret->activation_output = genann_act_sigmoid_cached;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
GENANN *genann_read(FILE *in) {
|
||||||
|
int inputs, hidden_layers, hidden, outputs;
|
||||||
|
fscanf(in, "%d %d %d %d", &inputs, &hidden_layers, &hidden, &outputs);
|
||||||
|
|
||||||
|
GENANN *ann = genann_init(inputs, hidden_layers, hidden, outputs);
|
||||||
|
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < ann->total_weights; ++i) {
|
||||||
|
fscanf(in, " %le", ann->weight + i);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ann;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
GENANN *genann_copy(GENANN const *ann) {
|
||||||
|
const int size = sizeof(GENANN) + sizeof(double) * (ann->total_weights + ann->total_neurons + (ann->total_neurons - ann->inputs));
|
||||||
|
GENANN *ret = malloc(size);
|
||||||
|
if (!ret) return 0;
|
||||||
|
|
||||||
|
memcpy(ret, ann, size);
|
||||||
|
|
||||||
|
/* Set pointers. */
|
||||||
|
ret->weight = (double*)((char*)ret + sizeof(GENANN));
|
||||||
|
ret->output = ret->weight + ret->total_weights;
|
||||||
|
ret->delta = ret->output + ret->total_neurons;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void genann_randomize(GENANN *ann) {
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < ann->total_weights; ++i) {
|
||||||
|
double r = GENANN_RANDOM();
|
||||||
|
/* Sets weights from -0.5 to 0.5. */
|
||||||
|
ann->weight[i] = r - 0.5;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void genann_free(GENANN *ann) {
|
||||||
|
/* The weight, output, and delta pointers go to the same buffer. */
|
||||||
|
free(ann);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
double const *genann_run(GENANN const *ann, double const *inputs) {
|
||||||
|
double const *w = ann->weight;
|
||||||
|
double *o = ann->output + ann->inputs;
|
||||||
|
double const *i = ann->output;
|
||||||
|
|
||||||
|
/* Copy the inputs to the scratch area, where we also store each neuron's
|
||||||
|
* output, for consistency. This way the first layer isn't a special case. */
|
||||||
|
memcpy(ann->output, inputs, sizeof(double) * ann->inputs);
|
||||||
|
|
||||||
|
int h, j, k;
|
||||||
|
|
||||||
|
const GENANN_ACTFUN act = ann->activation_hidden;
|
||||||
|
const GENANN_ACTFUN acto = ann->activation_output;
|
||||||
|
|
||||||
|
/* Figure hidden layers, if any. */
|
||||||
|
for (h = 0; h < ann->hidden_layers; ++h) {
|
||||||
|
for (j = 0; j < ann->hidden; ++j) {
|
||||||
|
double sum = 0;
|
||||||
|
for (k = 0; k < (h == 0 ? ann->inputs : ann->hidden) + 1; ++k) {
|
||||||
|
if (k == 0) {
|
||||||
|
sum += *w++ * -1.0;
|
||||||
|
} else {
|
||||||
|
sum += *w++ * i[k-1];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*o++ = act(sum);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
i += (h == 0 ? ann->inputs : ann->hidden);
|
||||||
|
}
|
||||||
|
|
||||||
|
double const *ret = o;
|
||||||
|
|
||||||
|
/* Figure output layer. */
|
||||||
|
for (j = 0; j < ann->outputs; ++j) {
|
||||||
|
double sum = 0;
|
||||||
|
for (k = 0; k < (ann->hidden_layers ? ann->hidden : ann->inputs) + 1; ++k) {
|
||||||
|
if (k == 0) {
|
||||||
|
sum += *w++ * -1.0;
|
||||||
|
} else {
|
||||||
|
sum += *w++ * i[k-1];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*o++ = acto(sum);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Sanity check that we used all weights and wrote all outputs. */
|
||||||
|
assert(w - ann->weight == ann->total_weights);
|
||||||
|
assert(o - ann->output == ann->total_neurons);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void genann_train(GENANN const *ann, double const *inputs, double const *desired_outputs, double learning_rate) {
|
||||||
|
/* To begin with, we must run the network forward. */
|
||||||
|
genann_run(ann, inputs);
|
||||||
|
|
||||||
|
int h, j, k;
|
||||||
|
|
||||||
|
/* First set the output layer deltas. */
|
||||||
|
{
|
||||||
|
double const *o = ann->output + ann->inputs + ann->hidden * ann->hidden_layers; /* First output. */
|
||||||
|
double *d = ann->delta + ann->hidden * ann->hidden_layers; /* First delta. */
|
||||||
|
double const *t = desired_outputs; /* First desired output. */
|
||||||
|
|
||||||
|
|
||||||
|
/* Set output layer deltas. */
|
||||||
|
for (j = 0; j < ann->outputs; ++j) {
|
||||||
|
*d = (*t - *o) * *o * (1.0 - *o);
|
||||||
|
++o; ++d; ++t;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Set hidden layer deltas, start on last layer and work backwards. */
|
||||||
|
/* Note that loop is skipped in the case of hidden_layers == 0. */
|
||||||
|
for (h = ann->hidden_layers - 1; h >= 0; --h) {
|
||||||
|
|
||||||
|
/* Find first output and delta in this layer. */
|
||||||
|
double const *o = ann->output + ann->inputs + (h * ann->hidden);
|
||||||
|
double *d = ann->delta + (h * ann->hidden);
|
||||||
|
|
||||||
|
/* Find first delta in following layer (which may be hidden or output). */
|
||||||
|
double const * const dd = ann->delta + ((h+1) * ann->hidden);
|
||||||
|
|
||||||
|
/* Find first weight in following layer (which may be hidden or output). */
|
||||||
|
double const * const ww = ann->weight + ((ann->inputs+1) * ann->hidden) + ((ann->hidden+1) * ann->hidden * (h));
|
||||||
|
|
||||||
|
for (j = 0; j < ann->hidden; ++j) {
|
||||||
|
|
||||||
|
double delta = 0;
|
||||||
|
|
||||||
|
for (k = 0; k < (h == ann->hidden_layers-1 ? ann->outputs : ann->hidden); ++k) {
|
||||||
|
const double forward_delta = dd[k];
|
||||||
|
const int windex = k * (ann->hidden + 1) + (j + 1);
|
||||||
|
const double forward_weight = ww[windex];
|
||||||
|
delta += forward_delta * forward_weight;
|
||||||
|
}
|
||||||
|
|
||||||
|
*d = *o * (1.0-*o) * delta;
|
||||||
|
++d; ++o;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Train the outputs. */
|
||||||
|
{
|
||||||
|
/* Find first output delta. */
|
||||||
|
double const *d = ann->delta + ann->hidden * ann->hidden_layers; /* First output delta. */
|
||||||
|
|
||||||
|
/* Find first weight to first output delta. */
|
||||||
|
double *w = ann->weight + (ann->hidden_layers
|
||||||
|
? ((ann->inputs+1) * ann->hidden + (ann->hidden+1) * ann->hidden * (ann->hidden_layers-1))
|
||||||
|
: (0));
|
||||||
|
|
||||||
|
/* Find first output in previous layer. */
|
||||||
|
double const * const i = ann->output + (ann->hidden_layers
|
||||||
|
? (ann->inputs + (ann->hidden) * (ann->hidden_layers-1))
|
||||||
|
: 0);
|
||||||
|
|
||||||
|
/* Set output layer deltas. */
|
||||||
|
for (j = 0; j < ann->outputs; ++j) {
|
||||||
|
for (k = 0; k < (ann->hidden_layers ? ann->hidden : ann->inputs) + 1; ++k) {
|
||||||
|
if (k == 0) {
|
||||||
|
*w++ += *d * learning_rate * -1.0;
|
||||||
|
} else {
|
||||||
|
*w++ += *d * learning_rate * i[k-1];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
++d;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(w - ann->weight == ann->total_weights);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Train the hidden layers. */
|
||||||
|
for (h = ann->hidden_layers - 1; h >= 0; --h) {
|
||||||
|
|
||||||
|
/* Find first delta in this layer. */
|
||||||
|
double const *d = ann->delta + (h * ann->hidden);
|
||||||
|
|
||||||
|
/* Find first input to this layer. */
|
||||||
|
double const *i = ann->output + (h
|
||||||
|
? (ann->inputs + ann->hidden * (h-1))
|
||||||
|
: 0);
|
||||||
|
|
||||||
|
/* Find first weight to this layer. */
|
||||||
|
double *w = ann->weight + (h
|
||||||
|
? ((ann->inputs+1) * ann->hidden + (ann->hidden+1) * (ann->hidden) * (h-1))
|
||||||
|
: 0);
|
||||||
|
|
||||||
|
|
||||||
|
for (j = 0; j < ann->hidden; ++j) {
|
||||||
|
for (k = 0; k < (h == 0 ? ann->inputs : ann->hidden) + 1; ++k) {
|
||||||
|
if (k == 0) {
|
||||||
|
*w++ += *d * learning_rate * -1.0;
|
||||||
|
} else {
|
||||||
|
*w++ += *d * learning_rate * i[k-1];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
++d;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void genann_write(GENANN const *ann, FILE *out) {
|
||||||
|
fprintf(out, "%d %d %d %d", ann->inputs, ann->hidden_layers, ann->hidden, ann->outputs);
|
||||||
|
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < ann->total_weights; ++i) {
|
||||||
|
fprintf(out, " %.20e", ann->weight[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
103
genann.h
Normal file
103
genann.h
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
/*
|
||||||
|
* GENANN - Minimal C Artificial Neural Network
|
||||||
|
*
|
||||||
|
* Copyright (c) 2015, 2016 Lewis Van Winkle
|
||||||
|
*
|
||||||
|
* http://CodePlea.com
|
||||||
|
*
|
||||||
|
* This software is provided 'as-is', without any express or implied
|
||||||
|
* warranty. In no event will the authors be held liable for any damages
|
||||||
|
* arising from the use of this software.
|
||||||
|
*
|
||||||
|
* Permission is granted to anyone to use this software for any purpose,
|
||||||
|
* including commercial applications, and to alter it and redistribute it
|
||||||
|
* freely, subject to the following restrictions:
|
||||||
|
*
|
||||||
|
* 1. The origin of this software must not be misrepresented; you must not
|
||||||
|
* claim that you wrote the original software. If you use this software
|
||||||
|
* in a product, an acknowledgement in the product documentation would be
|
||||||
|
* appreciated but is not required.
|
||||||
|
* 2. Altered source versions must be plainly marked as such, and must not be
|
||||||
|
* misrepresented as being the original software.
|
||||||
|
* 3. This notice may not be removed or altered from any source distribution.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef __GENANN_H__
|
||||||
|
#define __GENANN_H__
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef GENANN_RANDOM
|
||||||
|
/* We use the following for uniform random numbers between 0 and 1.
|
||||||
|
* If you have a better function, redefine this macro. */
|
||||||
|
#define GENANN_RANDOM() (((double)rand())/RAND_MAX)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
typedef double (*GENANN_ACTFUN)(double a);
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct GENANN {
|
||||||
|
/* How many inputs, outputs, and hidden neurons. */
|
||||||
|
int inputs, hidden_layers, hidden, outputs;
|
||||||
|
|
||||||
|
/* Which activation function to use for hidden neurons. Default: gennann_act_sigmoid_cached*/
|
||||||
|
GENANN_ACTFUN activation_hidden;
|
||||||
|
|
||||||
|
/* Which activation function to use for output. Default: gennann_act_sigmoid_cached*/
|
||||||
|
GENANN_ACTFUN activation_output;
|
||||||
|
|
||||||
|
/* Total number of weights, and size of weights buffer. */
|
||||||
|
int total_weights;
|
||||||
|
|
||||||
|
/* Total number of neurons + inputs and size of output buffer. */
|
||||||
|
int total_neurons;
|
||||||
|
|
||||||
|
/* All weights (total_weights long). */
|
||||||
|
double *weight;
|
||||||
|
|
||||||
|
/* Stores input array and output of each neuron (total_neurons long). */
|
||||||
|
double *output;
|
||||||
|
|
||||||
|
/* Stores delta of each hidden and output neuron (total_neurons - inputs long). */
|
||||||
|
double *delta;
|
||||||
|
|
||||||
|
} GENANN;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/* Creates and returns a new ann. */
|
||||||
|
GENANN *genann_init(int inputs, int hidden_layers, int hidden, int outputs);
|
||||||
|
|
||||||
|
/* Creates ANN from file saved with genann_write. */
|
||||||
|
GENANN *genann_read(FILE *in);
|
||||||
|
|
||||||
|
/* Sets weights randomly. Called by init. */
|
||||||
|
void genann_randomize(GENANN *ann);
|
||||||
|
|
||||||
|
/* Returns a new copy of ann. */
|
||||||
|
GENANN *genann_copy(GENANN const *ann);
|
||||||
|
|
||||||
|
/* Frees the memory used by an ann. */
|
||||||
|
void genann_free(GENANN *ann);
|
||||||
|
|
||||||
|
/* Runs the feedforward algorithm to calculate the ann's output. */
|
||||||
|
double const *genann_run(GENANN const *ann, double const *inputs);
|
||||||
|
|
||||||
|
/* Does a single backprop update. */
|
||||||
|
void genann_train(GENANN const *ann, double const *inputs, double const *desired_outputs, double learning_rate);
|
||||||
|
|
||||||
|
/* Saves the ann. */
|
||||||
|
void genann_write(GENANN const *ann, FILE *out);
|
||||||
|
|
||||||
|
|
||||||
|
double genann_act_sigmoid(double a);
|
||||||
|
double genann_act_sigmoid_cached(double a);
|
||||||
|
double genann_act_threshold(double a);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#endif /*__GENANN_H__*/
|
127
minctest.h
Normal file
127
minctest.h
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
/*
|
||||||
|
*
|
||||||
|
* MINCTEST - Minimal C Test Library - 0.1
|
||||||
|
*
|
||||||
|
* Copyright (c) 2014, 2015, 2016 Lewis Van Winkle
|
||||||
|
*
|
||||||
|
* http://CodePlea.com
|
||||||
|
*
|
||||||
|
* This software is provided 'as-is', without any express or implied
|
||||||
|
* warranty. In no event will the authors be held liable for any damages
|
||||||
|
* arising from the use of this software.
|
||||||
|
*
|
||||||
|
* Permission is granted to anyone to use this software for any purpose,
|
||||||
|
* including commercial applications, and to alter it and redistribute it
|
||||||
|
* freely, subject to the following restrictions:
|
||||||
|
*
|
||||||
|
* 1. The origin of this software must not be misrepresented; you must not
|
||||||
|
* claim that you wrote the original software. If you use this software
|
||||||
|
* in a product, an acknowledgement in the product documentation would be
|
||||||
|
* appreciated but is not required.
|
||||||
|
* 2. Altered source versions must be plainly marked as such, and must not be
|
||||||
|
* misrepresented as being the original software.
|
||||||
|
* 3. This notice may not be removed or altered from any source distribution.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* MINCTEST - Minimal testing library for C
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* Example:
|
||||||
|
*
|
||||||
|
* void test1() {
|
||||||
|
* lok('a' == 'a');
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* void test2() {
|
||||||
|
* lequal(5, 6);
|
||||||
|
* lfequal(5.5, 5.6);
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* int main() {
|
||||||
|
* lrun("test1", test1);
|
||||||
|
* lrun("test2", test2);
|
||||||
|
* lresults();
|
||||||
|
* return lfails != 0;
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* Hints:
|
||||||
|
* All functions/variables start with the letter 'l'.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef __MINCTEST_H__
|
||||||
|
#define __MINCTEST_H__
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <math.h>
|
||||||
|
#include <time.h>
|
||||||
|
|
||||||
|
|
||||||
|
/* How far apart can floats be before we consider them unequal. */
|
||||||
|
#define LTEST_FLOAT_TOLERANCE 0.001
|
||||||
|
|
||||||
|
|
||||||
|
/* Track the number of passes, fails. */
|
||||||
|
/* NB this is made for all tests to be in one file. */
|
||||||
|
static int ltests = 0;
|
||||||
|
static int lfails = 0;
|
||||||
|
|
||||||
|
|
||||||
|
/* Display the test results. */
|
||||||
|
#define lresults() do {\
|
||||||
|
if (lfails == 0) {\
|
||||||
|
printf("ALL TESTS PASSED (%d/%d)\n", ltests, ltests);\
|
||||||
|
} else {\
|
||||||
|
printf("SOME TESTS FAILED (%d/%d)\n", ltests-lfails, ltests);\
|
||||||
|
}\
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
|
||||||
|
/* Run a test. Name can be any string to print out, test is the function name to call. */
|
||||||
|
#define lrun(name, test) do {\
|
||||||
|
const int ts = ltests;\
|
||||||
|
const int fs = lfails;\
|
||||||
|
const clock_t start = clock();\
|
||||||
|
printf("\t%-14s", name);\
|
||||||
|
test();\
|
||||||
|
printf("pass:%2d fail:%2d %4dms\n",\
|
||||||
|
(ltests-ts)-(lfails-fs), lfails-fs,\
|
||||||
|
(int)((clock() - start) * 1000 / CLOCKS_PER_SEC));\
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
|
||||||
|
/* Assert a true statement. */
|
||||||
|
#define lok(test) do {\
|
||||||
|
++ltests;\
|
||||||
|
if (!(test)) {\
|
||||||
|
++lfails;\
|
||||||
|
printf("%s:%d error \n", __FILE__, __LINE__);\
|
||||||
|
}} while (0)
|
||||||
|
|
||||||
|
|
||||||
|
/* Assert two integers are equal. */
|
||||||
|
#define lequal(a, b) do {\
|
||||||
|
++ltests;\
|
||||||
|
if ((a) != (b)) {\
|
||||||
|
++lfails;\
|
||||||
|
printf("%s:%d (%d != %d)\n", __FILE__, __LINE__, (a), (b));\
|
||||||
|
}} while (0)
|
||||||
|
|
||||||
|
|
||||||
|
/* Assert two floats are equal (Within LTEST_FLOAT_TOLERANCE). */
|
||||||
|
#define lfequal(a, b) do {\
|
||||||
|
++ltests;\
|
||||||
|
if (fabs((double)(a)-(double)(b)) > LTEST_FLOAT_TOLERANCE) {\
|
||||||
|
++lfails;\
|
||||||
|
printf("%s:%d (%f != %f)\n", __FILE__, __LINE__, (double)(a), (double)(b));\
|
||||||
|
}} while (0)
|
||||||
|
|
||||||
|
|
||||||
|
#endif /*__MINCTEST_H__*/
|
276
test.c
Normal file
276
test.c
Normal file
@ -0,0 +1,276 @@
|
|||||||
|
/*
|
||||||
|
* GENANN - Minimal C Artificial Neural Network
|
||||||
|
*
|
||||||
|
* Copyright (c) 2015, 2016 Lewis Van Winkle
|
||||||
|
*
|
||||||
|
* http://CodePlea.com
|
||||||
|
*
|
||||||
|
* This software is provided 'as-is', without any express or implied
|
||||||
|
* warranty. In no event will the authors be held liable for any damages
|
||||||
|
* arising from the use of this software.
|
||||||
|
*
|
||||||
|
* Permission is granted to anyone to use this software for any purpose,
|
||||||
|
* including commercial applications, and to alter it and redistribute it
|
||||||
|
* freely, subject to the following restrictions:
|
||||||
|
*
|
||||||
|
* 1. The origin of this software must not be misrepresented; you must not
|
||||||
|
* claim that you wrote the original software. If you use this software
|
||||||
|
* in a product, an acknowledgement in the product documentation would be
|
||||||
|
* appreciated but is not required.
|
||||||
|
* 2. Altered source versions must be plainly marked as such, and must not be
|
||||||
|
* misrepresented as being the original software.
|
||||||
|
* 3. This notice may not be removed or altered from any source distribution.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "genann.h"
|
||||||
|
#include "minctest.h"
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <math.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
void basic() {
|
||||||
|
GENANN *ann = genann_init(1, 0, 0, 1);
|
||||||
|
|
||||||
|
lequal(ann->total_weights, 2);
|
||||||
|
double a;
|
||||||
|
|
||||||
|
|
||||||
|
a = 0;
|
||||||
|
ann->weight[0] = 0;
|
||||||
|
ann->weight[1] = 0;
|
||||||
|
lfequal(0.5, *genann_run(ann, &a));
|
||||||
|
|
||||||
|
a = 1;
|
||||||
|
lfequal(0.5, *genann_run(ann, &a));
|
||||||
|
|
||||||
|
a = 11;
|
||||||
|
lfequal(0.5, *genann_run(ann, &a));
|
||||||
|
|
||||||
|
a = 1;
|
||||||
|
ann->weight[0] = 1;
|
||||||
|
ann->weight[1] = 1;
|
||||||
|
lfequal(0.5, *genann_run(ann, &a));
|
||||||
|
|
||||||
|
a = 10;
|
||||||
|
ann->weight[0] = 1;
|
||||||
|
ann->weight[1] = 1;
|
||||||
|
lfequal(1.0, *genann_run(ann, &a));
|
||||||
|
|
||||||
|
a = -10;
|
||||||
|
lfequal(0.0, *genann_run(ann, &a));
|
||||||
|
|
||||||
|
genann_free(ann);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void xor() {
|
||||||
|
GENANN *ann = genann_init(2, 1, 2, 1);
|
||||||
|
ann->activation_hidden = genann_act_threshold;
|
||||||
|
ann->activation_output = genann_act_threshold;
|
||||||
|
|
||||||
|
lequal(ann->total_weights, 9);
|
||||||
|
|
||||||
|
/* First hidden. */
|
||||||
|
ann->weight[0] = .5;
|
||||||
|
ann->weight[1] = 1;
|
||||||
|
ann->weight[2] = 1;
|
||||||
|
|
||||||
|
/* Second hidden. */
|
||||||
|
ann->weight[3] = 1;
|
||||||
|
ann->weight[4] = 1;
|
||||||
|
ann->weight[5] = 1;
|
||||||
|
|
||||||
|
/* Output. */
|
||||||
|
ann->weight[6] = .5;
|
||||||
|
ann->weight[7] = 1;
|
||||||
|
ann->weight[8] = -1;
|
||||||
|
|
||||||
|
|
||||||
|
double input[4][2] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
|
||||||
|
double output[4] = {0, 1, 1, 0};
|
||||||
|
|
||||||
|
lfequal(output[0], *genann_run(ann, input[0]));
|
||||||
|
lfequal(output[1], *genann_run(ann, input[1]));
|
||||||
|
lfequal(output[2], *genann_run(ann, input[2]));
|
||||||
|
lfequal(output[3], *genann_run(ann, input[3]));
|
||||||
|
|
||||||
|
genann_free(ann);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void backprop() {
|
||||||
|
GENANN *ann = genann_init(1, 0, 0, 1);
|
||||||
|
|
||||||
|
double input, output;
|
||||||
|
input = .5;
|
||||||
|
output = 1;
|
||||||
|
|
||||||
|
double first_try = *genann_run(ann, &input);
|
||||||
|
genann_train(ann, &input, &output, .5);
|
||||||
|
double second_try = *genann_run(ann, &input);
|
||||||
|
lok(fabs(first_try - output) > fabs(second_try - output));
|
||||||
|
|
||||||
|
genann_free(ann);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void train_and() {
|
||||||
|
double input[4][2] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
|
||||||
|
double output[4] = {0, 0, 0, 1};
|
||||||
|
|
||||||
|
GENANN *ann = genann_init(2, 0, 0, 1);
|
||||||
|
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
for (i = 0; i < 50; ++i) {
|
||||||
|
for (j = 0; j < 4; ++j) {
|
||||||
|
genann_train(ann, input[j], output + j, .8);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ann->activation_output = genann_act_threshold;
|
||||||
|
lfequal(output[0], *genann_run(ann, input[0]));
|
||||||
|
lfequal(output[1], *genann_run(ann, input[1]));
|
||||||
|
lfequal(output[2], *genann_run(ann, input[2]));
|
||||||
|
lfequal(output[3], *genann_run(ann, input[3]));
|
||||||
|
|
||||||
|
genann_free(ann);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void train_or() {
|
||||||
|
double input[4][2] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
|
||||||
|
double output[4] = {0, 1, 1, 1};
|
||||||
|
|
||||||
|
GENANN *ann = genann_init(2, 0, 0, 1);
|
||||||
|
genann_randomize(ann);
|
||||||
|
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
for (i = 0; i < 50; ++i) {
|
||||||
|
for (j = 0; j < 4; ++j) {
|
||||||
|
genann_train(ann, input[j], output + j, .8);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ann->activation_output = genann_act_threshold;
|
||||||
|
lfequal(output[0], *genann_run(ann, input[0]));
|
||||||
|
lfequal(output[1], *genann_run(ann, input[1]));
|
||||||
|
lfequal(output[2], *genann_run(ann, input[2]));
|
||||||
|
lfequal(output[3], *genann_run(ann, input[3]));
|
||||||
|
|
||||||
|
genann_free(ann);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
void train_xor() {
|
||||||
|
double input[4][2] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
|
||||||
|
double output[4] = {0, 1, 1, 0};
|
||||||
|
|
||||||
|
GENANN *ann = genann_init(2, 1, 2, 1);
|
||||||
|
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
for (i = 0; i < 300; ++i) {
|
||||||
|
for (j = 0; j < 4; ++j) {
|
||||||
|
genann_train(ann, input[j], output + j, 3);
|
||||||
|
}
|
||||||
|
/* printf("%1.2f ", xor_score(ann)); */
|
||||||
|
}
|
||||||
|
|
||||||
|
ann->activation_output = genann_act_threshold;
|
||||||
|
lfequal(output[0], *genann_run(ann, input[0]));
|
||||||
|
lfequal(output[1], *genann_run(ann, input[1]));
|
||||||
|
lfequal(output[2], *genann_run(ann, input[2]));
|
||||||
|
lfequal(output[3], *genann_run(ann, input[3]));
|
||||||
|
|
||||||
|
genann_free(ann);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
void persist() {
|
||||||
|
GENANN *first = genann_init(1000, 5, 50, 10);
|
||||||
|
|
||||||
|
FILE *out = fopen("persist.txt", "w");
|
||||||
|
genann_write(first, out);
|
||||||
|
fclose(out);
|
||||||
|
|
||||||
|
|
||||||
|
FILE *in = fopen("persist.txt", "r");
|
||||||
|
GENANN *second = genann_read(in);
|
||||||
|
fclose(out);
|
||||||
|
|
||||||
|
lequal(first->inputs, second->inputs);
|
||||||
|
lequal(first->hidden_layers, second->hidden_layers);
|
||||||
|
lequal(first->hidden, second->hidden);
|
||||||
|
lequal(first->outputs, second->outputs);
|
||||||
|
lequal(first->total_weights, second->total_weights);
|
||||||
|
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < first->total_weights; ++i) {
|
||||||
|
lok(first->weight[i] == second->weight[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
genann_free(first);
|
||||||
|
genann_free(second);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void copy() {
|
||||||
|
GENANN *first = genann_init(1000, 5, 50, 10);
|
||||||
|
|
||||||
|
GENANN *second = genann_copy(first);
|
||||||
|
|
||||||
|
lequal(first->inputs, second->inputs);
|
||||||
|
lequal(first->hidden_layers, second->hidden_layers);
|
||||||
|
lequal(first->hidden, second->hidden);
|
||||||
|
lequal(first->outputs, second->outputs);
|
||||||
|
lequal(first->total_weights, second->total_weights);
|
||||||
|
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < first->total_weights; ++i) {
|
||||||
|
lfequal(first->weight[i], second->weight[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
genann_free(first);
|
||||||
|
genann_free(second);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void sigmoid() {
|
||||||
|
double i = -20;
|
||||||
|
const double max = 20;
|
||||||
|
const double d = .0001;
|
||||||
|
|
||||||
|
while (i < max) {
|
||||||
|
lfequal(genann_act_sigmoid(i), genann_act_sigmoid_cached(i));
|
||||||
|
i += d;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
printf("GENANN TEST SUITE\n");
|
||||||
|
|
||||||
|
srand(100);
|
||||||
|
|
||||||
|
lrun("basic", basic);
|
||||||
|
lrun("xor", xor);
|
||||||
|
lrun("backprop", backprop);
|
||||||
|
lrun("train and", train_and);
|
||||||
|
lrun("train or", train_or);
|
||||||
|
lrun("train xor", train_xor);
|
||||||
|
lrun("persist", persist);
|
||||||
|
lrun("copy", copy);
|
||||||
|
lrun("sigmoid", sigmoid);
|
||||||
|
|
||||||
|
lresults();
|
||||||
|
|
||||||
|
return lfails != 0;
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user