diff --git a/DIRECTORY.md b/DIRECTORY.md index 24213992..3db8eab4 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -29,6 +29,7 @@ * [Create Node](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/data_structures/binary_trees/create_node.c) * [Recursive Traversals](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/data_structures/binary_trees/recursive_traversals.c) * [Redblacktree](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/data_structures/binary_trees/redblacktree.c) + * [Threaded Binary Trees](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/data_structures/binary_trees/threaded_binary_trees.c) * Dictionary * [Dict](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/data_structures/dictionary/dict.c) * [Dict](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/data_structures/dictionary/dict.h) @@ -203,6 +204,7 @@ ## Machine Learning * [Adaline Learning](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/machine_learning/adaline_learning.c) + * [Kohonen Som Image](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/machine_learning/kohonen_som_image.c) * [Kohonen Som Trace](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/machine_learning/kohonen_som_trace.c) ## Misc diff --git a/data_structures/binary_trees/threaded_binary_trees.c b/data_structures/binary_trees/threaded_binary_trees.c new file mode 100644 index 00000000..49accb08 --- /dev/null +++ b/data_structures/binary_trees/threaded_binary_trees.c @@ -0,0 +1,247 @@ +/** + * @file + * \brief This file is a simple implementation of a Threaded Binary Tree + * + * Threaded Binary Tree is a binary tree variant in which all left child + * pointers that are NULL (in Linked list representation) point to its + * in-order predecessor, and all right child pointers that are NULL + * (in Linked list representation) point to its in-order successor. + * It has the following functionalities: + * - Insertion + * - Search + * - Deletion + * - Listing of node keys inorder,preorder,postorder + * + * -see binary_search_tree.c + * + * \author [Amitha Nayak](https://github.com/amitnayakblr) + */ + +#include +#include + +/** + * Node, the basic data structure of the tree + **/ +typedef struct Node { + int data; /**< stores the number */ + struct Node *llink; /**< link to left child */ + struct Node *rlink; /**< link to right child */ +} node; + +/** + * creates a new node + * param[in] data value to be inserted + * \returns a pointer to the new node + **/ +node *create_node(int data) { + node *ptr = (node *)malloc(sizeof(node)); + ptr->rlink = ptr->llink = NULL; + ptr->data = data; + return ptr; +} + +/** + * inserts a node into the tree + * param[in,out] root pointer to node pointer to the topmost node of the tree + * param[in] data value to be inserted into the tree + */ +void insert_bt(node **root, int data) { + node *new_node = create_node(data); + node *temp; // to be deleted + node *prev; // keeps track of the parent of the element deleted + if (*root == NULL) { + *root = new_node; + } else { + temp = *root; + prev = NULL; + while (temp != NULL) { + if (new_node->data > temp->data) { + prev = temp; + temp = temp->rlink; + } else if (new_node->data < temp->data) { + prev = temp; + temp = temp->llink; + } else { + return; + } + } + + if (new_node->data > prev->data) { + prev->rlink = new_node; + } else { + prev->llink = new_node; + } + } +} + +/** + * searches for the element + * \param[in] root node pointer to the topmost node of the tree + * \param[in] ele value searched for + */ +void search(node *root, int ele) { + node *temp = root; + while (temp != NULL) { + if (temp->data == ele) { + break; + } else if (ele > temp->data) { + temp = temp->rlink; + } else { + temp = temp->llink; + } + } + + if (temp == NULL) { + printf("%s\n", "Element not found."); + } else + printf("%s\n", "Element found."); +} + +/** + * performs inorder traversal + * param[in] curr node pointer to the topmost node of the tree + */ +void inorder_display(node *curr) { + if (curr != NULL) { + inorder_display(curr->llink); + printf("%d\t", curr->data); + inorder_display(curr->rlink); + } +} + +/** + * performs postorder traversal + * param[in] curr node pointer to the topmost node of the tree + */ +void postorder_display(node *curr) { + if (curr != NULL) { + postorder_display(curr->llink); + postorder_display(curr->rlink); + printf("%d\t", curr->data); + } +} + +/** + * performs preorder traversal + * param[in] curr node pointer to the topmost node of the tree + */ +void preorder_display(node *curr) { + if (curr != NULL) { + printf("%d\t", curr->data); + preorder_display(curr->llink); + preorder_display(curr->rlink); + } +} + +/** + * deletion of a node from the tree + * if the node isn't present in the tree, it takes no action. + * param[in,out] root pointer to node pointer to the topmost node of the tree + * param[in] ele value to be deleted from the tree + */ +void delete_bt(node **root, int ele) { + node *temp; + node *prev; + if (*root == NULL) + return; + else { + temp = *root; + prev = NULL; + // search + while (temp != NULL) { + if (temp->data == ele) { + break; + } else if (ele > temp->data) { + prev = temp; + temp = temp->rlink; + } else { + prev = temp; + temp = temp->llink; + } + } + } + + if (temp == NULL) + return; + else { + node *replacement; // deleted node's replacement + node *t; + if (temp->llink == NULL && temp->rlink == NULL) { + replacement = NULL; + } else if (temp->llink == NULL && temp->rlink != NULL) { + replacement = temp->rlink; + } else if (temp->llink != NULL && temp->rlink == NULL) { + replacement = temp->llink; + } else { + replacement = temp->rlink; // replaced with inorder successor + t = replacement; + while (t->llink != NULL) { + t = t->llink; + } + t->llink = temp->llink; // leftmost node of the replacement is linked to + // the left child of the deleted node + } + + if (temp == *root) { + free(*root); + *root = replacement; + } else if (prev->llink == temp) { + free(prev->llink); + prev->llink = replacement; + } else if (prev->rlink == temp) { + free(prev->rlink); + prev->rlink = replacement; + } + } +} + +/** + * main function + */ +int main() { + printf("BINARY THREADED TREE: \n"); + node *root = NULL; + int choice, n; + do { + printf("%s\n", "1. Insert into BT"); + printf("%s\n", "2. Print BT - inorder"); + printf("%s\n", "3. Print BT - preorder"); + printf("%s\n", "4. print BT - postorder"); + printf("%s\n", "5. delete from BT"); + printf("%s\n", "6. search in BT"); + printf("%s\n", "Type 0 to exit"); + scanf("%d", &choice); + + switch (choice) { + case 1: + printf("%s\n", "Enter a no:"); + scanf("%d", &n); + insert_bt(&root, n); + break; + case 2: + inorder_display(root); + printf("\n"); + break; + case 3: + preorder_display(root); + printf("\n"); + break; + case 4: + postorder_display(root); + printf("\n"); + break; + case 5: + printf("%s\n", "Enter a no:"); + scanf("%d", &n); + delete_bt(&root, n); + break; + case 6: + printf("%s\n", "Enter a no:"); + scanf("%d", &n); + search(root, n); + break; + } + } while (choice != 0); + return 0; +} diff --git a/machine_learning/kohonen_som_image.c b/machine_learning/kohonen_som_image.c new file mode 100644 index 00000000..6e039e38 --- /dev/null +++ b/machine_learning/kohonen_som_image.c @@ -0,0 +1,700 @@ +/** + * \file + * \brief [Kohonen self organizing + * map](https://en.wikipedia.org/wiki/Self-organizing_map) (topological map) + * + * \author [Krishna Vedala](https://github.com/kvedala) + * + * This example implements a powerful unsupervised learning algorithm called as + * a self organizing map. The algorithm creates a connected network of weights + * that closely follows the given data points. This thus creates a topological + * map of the given data i.e., it maintains the relationship between varipus + * data points in a much higher dimesional space by creating an equivalent in a + * 2-dimensional space. + * Trained topological maps for the test cases in the program + */ +#define _USE_MATH_DEFINES // required for MS Visual C +#include +#include +#include +#include +#ifdef _OPENMP // check if OpenMP based parallellization is available +#include +#endif + +#define max(a, b) (a > b ? a : b) // shorthand for maximum value +#define min(a, b) (a < b ? a : b) // shorthand for minimum value + +/** to store info regarding 3D arrays */ +struct array_3d +{ + int dim1, dim2, dim3; /**< lengths of each dimension */ + double *data; /**< pointer to data */ +}; + +/** Function that returns the pointer to (x, y, z) ^th location in the + * linear 3D array given by: + * \f[ + * X_{i,j,k} = i\times M\times N + j\times N + k + * \f] + * where \f$L\f$, \f$M\f$ and \f$N\f$ are the 3D matrix dimensions. + * \param[in] arr pointer to ::array_3d structure + * \param[in] x first index + * \param[in] y second index + * \param[in] z third index + * \returns pointer to (x,y,z)^th location of data + */ +double *data_3d(const struct array_3d *arr, int x, int y, int z) +{ + int offset = (x * arr->dim2 * arr->dim3) + (y * arr->dim3) + z; + return arr->data + offset; +} + +/** + * Helper function to generate a random number in a given interval. + * \n Steps: + * 1. `r1 = rand() % 100` gets a random number between 0 and 99 + * 2. `r2 = r1 / 100` converts random number to be between 0 and 0.99 + * 3. scale and offset the random number to given range of \f$[a,b)\f$ + * \f[ + * y = (b - a) \times \frac{\text{(random number between 0 and RAND_MAX)} \; + * \text{mod}\; 100}{100} + a \f] + * + * \param[in] a lower limit + * \param[in] b upper limit + * \returns random number in the range \f$[a,b)\f$ + */ +double _random(double a, double b) +{ + return ((b - a) * (rand() % 100) / 100.f) + a; +} + +/** + * Save a given n-dimensional data martix to file. + * + * \param[in] fname filename to save in (gets overwriten without confirmation) + * \param[in] X matrix to save + * \param[in] num_points rows in the matrix = number of points + * \param[in] num_features columns in the matrix = dimensions of points + * \returns 0 if all ok + * \returns -1 if file creation failed + */ +int save_2d_data(const char *fname, double **X, int num_points, + int num_features) +{ + FILE *fp = fopen(fname, "wt"); + if (!fp) // error with fopen + { + char msg[120]; + sprintf(msg, "File error (%s): ", fname); + perror(msg); + return -1; + } + + for (int i = 0; i < num_points; i++) // for each point in the array + { + for (int j = 0; j < num_features; j++) // for each feature in the array + { + fprintf(fp, "%.4g", X[i][j]); // print the feature value + if (j < num_features - 1) // if not the last feature + fputc(',', fp); // suffix comma + } + if (i < num_points - 1) // if not the last row + fputc('\n', fp); // start a new line + } + fclose(fp); + return 0; +} + +/** + * Create the distance matrix or U-matrix from the trained weights and save to + * disk. + * + * \param[in] fname filename to save in (gets overwriten without confirmation) + * \param[in] W model matrix to save + * \returns 0 if all ok + * \returns -1 if file creation failed + */ +int save_u_matrix(const char *fname, struct array_3d *W) +{ + FILE *fp = fopen(fname, "wt"); + if (!fp) // error with fopen + { + char msg[120]; + sprintf(msg, "File error (%s): ", fname); + perror(msg); + return -1; + } + + int R = max(W->dim1 >> 3, 2); /* neighborhood range */ + + for (int i = 0; i < W->dim1; i++) // for each x + { + for (int j = 0; j < W->dim2; j++) // for each y + { + double distance = 0.f; + int k; + + int from_x = max(0, i - R); + int to_x = min(W->dim1, i + R + 1); + int from_y = max(0, j - R); + int to_y = min(W->dim2, j + R + 1); + int l; +#ifdef _OPENMP +#pragma omp parallel for reduction(+ : distance) +#endif + for (l = from_x; l < to_x; l++) + { + for (int m = from_y; m < to_y; m++) + { + double d = 0.f; + for (k = 0; k < W->dim3; k++) // for each feature + { + double *w1 = data_3d(W, i, j, k); + double *w2 = data_3d(W, l, m, k); + d += (w1[0] - w2[0]) * (w1[0] - w2[0]); + // distance += w1[0] * w1[0]; + } + distance += sqrt(d); + // distance += d; + } + } + + distance /= R * R; // mean disntance from neighbors + fprintf(fp, "%.4g", distance); // print the mean separation + if (j < W->dim2 - 1) // if not the last column + fputc(',', fp); // suffix comma + } + if (i < W->dim1 - 1) // if not the last row + fputc('\n', fp); // start a new line + } + fclose(fp); + return 0; +} + +/** + * Get minimum value and index of the value in a matrix + * \param[in] X matrix to search + * \param[in] N number of points in the vector + * \param[out] val minimum value found + * \param[out] idx index where minimum value was found + */ +void get_min_2d(double **X, int N, double *val, int *x_idx, int *y_idx) +{ + val[0] = INFINITY; // initial min value + + for (int i = 0; i < N; i++) // traverse each x-index + { + for (int j = 0; j < N; j++) // traverse each y-index + { + if (X[i][j] < val[0]) // if a lower value is found + { // save the value and its index + x_idx[0] = i; + y_idx[0] = j; + val[0] = X[i][j]; + } + } + } +} + +/** + * Update weights of the SOM using Kohonen algorithm + * + * \param[in] X data point + * \param[in,out] W weights matrix + * \param[in,out] D temporary vector to store distances + * \param[in] num_out number of output points + * \param[in] num_features number of features per input sample + * \param[in] alpha learning rate \f$0<\alpha\le1\f$ + * \param[in] R neighborhood range + */ +double update_weights(const double *X, struct array_3d *W, double **D, + int num_out, int num_features, double alpha, int R) +{ + int x, y, k; + double d_min = 0.f; + +#ifdef _OPENMP +#pragma omp for +#endif + // step 1: for each 2D output point + for (x = 0; x < num_out; x++) + { + for (y = 0; y < num_out; y++) + { + D[x][y] = 0.f; + // compute Euclidian distance of each output + // point from the current sample + for (k = 0; k < num_features; k++) + { + double *w = data_3d(W, x, y, k); + D[x][y] += (w[0] - X[k]) * (w[0] - X[k]); + } + D[x][y] = sqrt(D[x][y]); + } + } + + // step 2: get closest node i.e., node with smallest Euclidian distance to + // the current pattern + int d_min_x, d_min_y; + get_min_2d(D, num_out, &d_min, &d_min_x, &d_min_y); + + // step 3a: get the neighborhood range + int from_x = max(0, d_min_x - R); + int to_x = min(num_out, d_min_x + R + 1); + int from_y = max(0, d_min_y - R); + int to_y = min(num_out, d_min_y + R + 1); + + // step 3b: update the weights of nodes in the + // neighborhood +#ifdef _OPENMP +#pragma omp for +#endif + for (x = from_x; x < to_x; x++) + { + for (y = from_y; y < to_y; y++) + { + for (k = 0; k < num_features; k++) + { + // apply scaling inversely proportional to distance from the + // current node + double d2 = (d_min_x - x) * (d_min_x - x) + + (d_min_y - y) * (d_min_y - y); + double scale_factor = exp(-d2 * 0.5 / (alpha * alpha)); + + double *w = data_3d(W, x, y, k); + + // update weights of nodes in the neighborhood + w[0] += alpha * scale_factor * (X[k] - w[0]); + } + } + } + return d_min; +} + +/** + * Apply incremental algorithm with updating neighborhood and learning rates + * on all samples in the given datset. + * + * \param[in] X data set + * \param[in,out] W weights matrix + * \param[in] D temporary vector to store distances + * \param[in] num_samples number of output points + * \param[in] num_features number of features per input sample + * \param[in] num_out number of output points + * \param[in] alpha_min terminal value of alpha + */ +void kohonen_som(double **X, struct array_3d *W, int num_samples, + int num_features, int num_out, double alpha_min) +{ + int R = num_out >> 2, iter = 0; + double **D = (double **)malloc(num_out * sizeof(double *)); + for (int i = 0; i < num_out; i++) + D[i] = (double *)malloc(num_out * sizeof(double)); + + double dmin = 1.f; + // Loop alpha from 1 to slpha_min + for (double alpha = 1.f; alpha > alpha_min && dmin > 1e-9; + alpha -= 0.005, iter++) + { + dmin = 0.f; + // Loop for each sample pattern in the data set + for (int sample = 0; sample < num_samples; sample++) + { + const double *x = X[sample]; + // update weights for the current input pattern sample + dmin = update_weights(x, W, D, num_out, num_features, alpha, R); + } + + // every 20th iteration, reduce the neighborhood range + if (iter % 20 == 0 && R > 0) + R--; + + dmin /= num_samples; + printf("alpha: %.4g\t R: %d\td_min: %.4g\n", alpha, R, dmin); + } + + for (int i = 0; i < num_out; i++) + free(D[i]); + free(D); +} + +/** Creates a random set of points distributed in four clusters in + * 3D space with centroids at the points + * * \f$(0,5, 0.5, 0.5)\f$ + * * \f$(0,5,-0.5, -0.5)\f$ + * * \f$(-0,5, 0.5, 0.5)\f$ + * * \f$(-0,5,-0.5, -0.5)\f$ + * + * \param[out] data matrix to store data in + * \param[in] N number of points required + */ +void test_2d_classes(double *const *data, int N) +{ + const double R = 0.3; // radius of cluster + int i; + const int num_classes = 4; + const double centres[][2] = { + // centres of each class cluster + {.5, .5}, // centre of class 1 + {.5, -.5}, // centre of class 2 + {-.5, .5}, // centre of class 3 + {-.5, -.5} // centre of class 4 + }; + +#ifdef _OPENMP +#pragma omp for +#endif + for (i = 0; i < N; i++) + { + int class = rand() % num_classes; // select a random class for the point + + // create random coordinates (x,y,z) around the centre of the class + data[i][0] = _random(centres[class][0] - R, centres[class][0] + R); + data[i][1] = _random(centres[class][1] - R, centres[class][1] + R); + + /* The follosing can also be used + for (int j = 0; j < 2; j++) + data[i][j] = _random(centres[class][j] - R, centres[class][j] + R); + */ + } +} + +/** Test that creates a random set of points distributed in four clusters in + * 2D space and trains an SOM that finds the topological pattern. + * The following [CSV](https://en.wikipedia.org/wiki/Comma-separated_values) + * files are created to validate the execution: + * * `test1.csv`: random test samples points with a circular pattern + * * `w11.csv`: initial random map + * * `w12.csv`: trained SOM map + * + * The outputs can be readily plotted in [gnuplot](https:://gnuplot.info) using + * the following snippet + * ```gnuplot + * set datafile separator ',' + * plot "test1.csv" title "original", \ + * "w11.csv" title "w1", \ + * "w12.csv" title "w2" + * ``` + * ![Sample execution + * output](https://raw.githubusercontent.com/kvedala/C/docs/images/machine_learning/kohonen/test1.svg) + */ +void test1() +{ + int j, N = 300; + int features = 2; + int num_out = 30; // image size - N x N + + // 2D space, hence size = number of rows * 2 + double **X = (double **)malloc(N * sizeof(double *)); + + // cluster nodex in 'x' * cluster nodes in 'y' * 2 + struct array_3d W; + W.dim1 = num_out; + W.dim2 = num_out; + W.dim3 = features; + W.data = (double *)malloc(num_out * num_out * features * + sizeof(double)); // assign rows + + for (int i = 0; i < max(num_out, N); i++) // loop till max(N, num_out) + { + if (i < N) // only add new arrays if i < N + X[i] = (double *)malloc(features * sizeof(double)); + if (i < num_out) // only add new arrays if i < num_out + { + for (int k = 0; k < num_out; k++) + { +#ifdef _OPENMP +#pragma omp for +#endif + // preallocate with random initial weights + for (j = 0; j < features; j++) + { + double *w = data_3d(&W, i, k, j); + w[0] = _random(-5, 5); + } + } + } + } + + test_2d_classes(X, N); // create test data around circumference of a circle + save_2d_data("test1.csv", X, N, features); // save test data points + save_u_matrix("w11.csv", &W); // save initial random weights + kohonen_som(X, &W, N, features, num_out, 1e-4); // train the SOM + save_u_matrix("w12.csv", &W); // save the resultant weights + + for (int i = 0; i < N; i++) + free(X[i]); + free(X); + free(W.data); +} + +/** Creates a random set of points distributed in four clusters in + * 3D space with centroids at the points + * * \f$(0,5, 0.5, 0.5)\f$ + * * \f$(0,5,-0.5, -0.5)\f$ + * * \f$(-0,5, 0.5, 0.5)\f$ + * * \f$(-0,5,-0.5, -0.5)\f$ + * + * \param[out] data matrix to store data in + * \param[in] N number of points required + */ +void test_3d_classes1(double *const *data, int N) +{ + const double R = 0.2; // radius of cluster + int i; + const int num_classes = 4; + const double centres[][3] = { + // centres of each class cluster + {.5, .5, .5}, // centre of class 1 + {.5, -.5, -.5}, // centre of class 2 + {-.5, .5, .5}, // centre of class 3 + {-.5, -.5 - .5} // centre of class 4 + }; + +#ifdef _OPENMP +#pragma omp for +#endif + for (i = 0; i < N; i++) + { + int class = rand() % num_classes; // select a random class for the point + + // create random coordinates (x,y,z) around the centre of the class + data[i][0] = _random(centres[class][0] - R, centres[class][0] + R); + data[i][1] = _random(centres[class][1] - R, centres[class][1] + R); + data[i][2] = _random(centres[class][2] - R, centres[class][2] + R); + + /* The follosing can also be used + for (int j = 0; j < 3; j++) + data[i][j] = _random(centres[class][j] - R, centres[class][j] + R); + */ + } +} + +/** Test that creates a random set of points distributed in 4 clusters in + * 3D space and trains an SOM that finds the topological pattern. The following + * [CSV](https://en.wikipedia.org/wiki/Comma-separated_values) files are created + * to validate the execution: + * * `test2.csv`: random test samples points with a lamniscate pattern + * * `w21.csv`: initial random map + * * `w22.csv`: trained SOM map + * + * The outputs can be readily plotted in [gnuplot](https:://gnuplot.info) using + * the following snippet + * ```gnuplot + * set datafile separator ',' + * plot "test2.csv" title "original", \ + * "w21.csv" title "w1", \ + * "w22.csv" title "w2" + * ``` + * ![Sample execution + * output](https://raw.githubusercontent.com/kvedala/C/docs/images/machine_learning/kohonen/test2.svg) + */ +void test2() +{ + int j, N = 500; + int features = 3; + int num_out = 30; // image size - N x N + + // 3D space, hence size = number of rows * 3 + double **X = (double **)malloc(N * sizeof(double *)); + + // cluster nodex in 'x' * cluster nodes in 'y' * 2 + struct array_3d W; + W.dim1 = num_out; + W.dim2 = num_out; + W.dim3 = features; + W.data = (double *)malloc(num_out * num_out * features * + sizeof(double)); // assign rows + + for (int i = 0; i < max(num_out, N); i++) // loop till max(N, num_out) + { + if (i < N) // only add new arrays if i < N + X[i] = (double *)malloc(features * sizeof(double)); + if (i < num_out) // only add new arrays if i < num_out + { + for (int k = 0; k < num_out; k++) + { +#ifdef _OPENMP +#pragma omp for +#endif + for (j = 0; j < features; j++) + { // preallocate with random initial weights + double *w = data_3d(&W, i, k, j); + w[0] = _random(-5, 5); + } + } + } + } + + test_3d_classes1(X, N); // create test data + save_2d_data("test2.csv", X, N, features); // save test data points + save_u_matrix("w21.csv", &W); // save initial random weights + kohonen_som(X, &W, N, features, num_out, 1e-4); // train the SOM + save_u_matrix("w22.csv", &W); // save the resultant weights + + for (int i = 0; i < N; i++) + free(X[i]); + free(X); + free(W.data); +} + +/** Creates a random set of points distributed in four clusters in + * 3D space with centroids at the points + * * \f$(0,5, 0.5, 0.5)\f$ + * * \f$(0,5,-0.5, -0.5)\f$ + * * \f$(-0,5, 0.5, 0.5)\f$ + * * \f$(-0,5,-0.5, -0.5)\f$ + * + * \param[out] data matrix to store data in + * \param[in] N number of points required + */ +void test_3d_classes2(double *const *data, int N) +{ + const double R = 0.2; // radius of cluster + int i; + const int num_classes = 8; + const double centres[][3] = { + // centres of each class cluster + {.5, .5, .5}, // centre of class 1 + {.5, .5, -.5}, // centre of class 2 + {.5, -.5, .5}, // centre of class 3 + {.5, -.5, -.5}, // centre of class 4 + {-.5, .5, .5}, // centre of class 5 + {-.5, .5, -.5}, // centre of class 6 + {-.5, -.5, .5}, // centre of class 7 + {-.5, -.5, -.5} // centre of class 8 + }; + +#ifdef _OPENMP +#pragma omp for +#endif + for (i = 0; i < N; i++) + { + int class = rand() % num_classes; // select a random class for the point + + // create random coordinates (x,y,z) around the centre of the class + data[i][0] = _random(centres[class][0] - R, centres[class][0] + R); + data[i][1] = _random(centres[class][1] - R, centres[class][1] + R); + data[i][2] = _random(centres[class][2] - R, centres[class][2] + R); + + /* The follosing can also be used + for (int j = 0; j < 3; j++) + data[i][j] = _random(centres[class][j] - R, centres[class][j] + R); + */ + } +} + +/** Test that creates a random set of points distributed in eight clusters in + * 3D space and trains an SOM that finds the topological pattern. The following + * [CSV](https://en.wikipedia.org/wiki/Comma-separated_values) files are created + * to validate the execution: + * * `test3.csv`: random test samples points with a circular pattern + * * `w31.csv`: initial random map + * * `w32.csv`: trained SOM map + * + * The outputs can be readily plotted in [gnuplot](https:://gnuplot.info) using + * the following snippet + * ```gnuplot + * set datafile separator ',' + * plot "test3.csv" title "original", \ + * "w31.csv" title "w1", \ + * "w32.csv" title "w2" + * ``` + * ![Sample execution + * output](https://raw.githubusercontent.com/kvedala/C/docs/images/machine_learning/kohonen/test3.svg) + */ +void test3() +{ + int j, N = 500; + int features = 3; + int num_out = 30; + double **X = (double **)malloc(N * sizeof(double *)); + + // cluster nodex in 'x' * cluster nodes in 'y' * 2 + struct array_3d W; + W.dim1 = num_out; + W.dim2 = num_out; + W.dim3 = features; + W.data = (double *)malloc(num_out * num_out * features * + sizeof(double)); // assign rows + + for (int i = 0; i < max(num_out, N); i++) // loop till max(N, num_out) + { + if (i < N) // only add new arrays if i < N + X[i] = (double *)malloc(features * sizeof(double)); + if (i < num_out) // only add new arrays if i < num_out + { + for (int k = 0; k < num_out; k++) + { +#ifdef _OPENMP +#pragma omp for +#endif + // preallocate with random initial weights + for (j = 0; j < features; j++) + { + double *w = data_3d(&W, i, k, j); + w[0] = _random(-5, 5); + } + } + } + } + + test_3d_classes2(X, N); // create test data around the lamniscate + save_2d_data("test3.csv", X, N, features); // save test data points + save_u_matrix("w31.csv", &W); // save initial random weights + kohonen_som(X, &W, N, features, num_out, 0.01); // train the SOM + save_u_matrix("w32.csv", &W); // save the resultant weights + + for (int i = 0; i < N; i++) + free(X[i]); + free(X); + free(W.data); +} + +/** + * Convert clock cycle difference to time in seconds + * + * \param[in] start_t start clock + * \param[in] end_t end clock + * \returns time difference in seconds + */ +double get_clock_diff(clock_t start_t, clock_t end_t) +{ + return (double)(end_t - start_t) / (double)CLOCKS_PER_SEC; +} + +/** Main function */ +int main(int argc, char **argv) +{ +#ifdef _OPENMP + printf("Using OpenMP based parallelization\n"); +#else + printf("NOT using OpenMP based parallelization\n"); +#endif + clock_t start_clk, end_clk; + + start_clk = clock(); + test1(); + end_clk = clock(); + printf("Test 1 completed in %.4g sec\n", + get_clock_diff(start_clk, end_clk)); + + start_clk = clock(); + test2(); + end_clk = clock(); + printf("Test 2 completed in %.4g sec\n", + get_clock_diff(start_clk, end_clk)); + + start_clk = clock(); + test3(); + end_clk = clock(); + printf("Test 3 completed in %.4g sec\n", + get_clock_diff(start_clk, end_clk)); + + printf("(Note: Calculated times include: creating test sets, training " + "model and writing files to disk.)\n\n"); + return 0; +} diff --git a/machine_learning/kohonen_som_trace.c b/machine_learning/kohonen_som_trace.c index 668dbde1..ec56f197 100644 --- a/machine_learning/kohonen_som_trace.c +++ b/machine_learning/kohonen_som_trace.c @@ -19,6 +19,9 @@ #include #endif +#define max(a, b) (a > b ? a : b) // shorthand for maximum value +#define min(a, b) (a < b ? a : b) // shorthand for minimum value + /** * Helper function to generate a random number in a given interval. * \n Steps: @@ -132,8 +135,8 @@ void update_weights(double const *x, double *const *W, double *D, int num_out, get_min_1d(D, num_out, &d_min, &d_min_idx); // step 3a: get the neighborhood range - int from_node = 0 > (d_min_idx - R) ? 0 : d_min_idx - R; - int to_node = num_out < (d_min_idx + R + 1) ? num_out : d_min_idx + R + 1; + int from_node = max(0, d_min_idx - R); + int to_node = min(num_out, d_min_idx + R + 1); // step 3b: update the weights of nodes in the // neighborhood @@ -240,10 +243,14 @@ void test1() int j, N = 500; int features = 2; int num_out = 50; + + // 2D space, hence size = number of rows * 2 double **X = (double **)malloc(N * sizeof(double *)); + + // number of clusters nodes * 2 double **W = (double **)malloc(num_out * sizeof(double *)); - for (int i = 0; i < (num_out > N ? num_out : N); - i++) // loop till max(N, num_out) + + for (int i = 0; i < max(num_out, N); i++) // loop till max(N, num_out) { if (i < N) // only add new arrays if i < N X[i] = (double *)malloc(features * sizeof(double)); @@ -266,7 +273,7 @@ void test1() kohonen_som_tracer(X, W, N, features, num_out, 0.1); // train the SOM save_nd_data("w12.csv", W, num_out, features); // save the resultant weights - for (int i = 0; i < (num_out > N ? num_out : N); i++) + for (int i = 0; i < max(num_out, N); i++) { if (i < N) free(X[i]); @@ -335,7 +342,7 @@ void test2() int num_out = 20; double **X = (double **)malloc(N * sizeof(double *)); double **W = (double **)malloc(num_out * sizeof(double *)); - for (int i = 0; i < (num_out > N ? num_out : N); i++) + for (int i = 0; i < max(num_out, N); i++) { if (i < N) // only add new arrays if i < N X[i] = (double *)malloc(features * sizeof(double)); @@ -359,7 +366,7 @@ void test2() kohonen_som_tracer(X, W, N, features, num_out, 0.01); // train the SOM save_nd_data("w22.csv", W, num_out, features); // save the resultant weights - for (int i = 0; i < (num_out > N ? num_out : N); i++) + for (int i = 0; i < max(num_out, N); i++) { if (i < N) free(X[i]); @@ -438,7 +445,7 @@ void test3() int num_out = 20; double **X = (double **)malloc(N * sizeof(double *)); double **W = (double **)malloc(num_out * sizeof(double *)); - for (int i = 0; i < (num_out > N ? num_out : N); i++) + for (int i = 0; i < max(num_out, N); i++) { if (i < N) // only add new arrays if i < N X[i] = (double *)malloc(features * sizeof(double)); @@ -462,7 +469,7 @@ void test3() kohonen_som_tracer(X, W, N, features, num_out, 0.01); // train the SOM save_nd_data("w32.csv", W, num_out, features); // save the resultant weights - for (int i = 0; i < (num_out > N ? num_out : N); i++) + for (int i = 0; i < max(num_out, N); i++) { if (i < N) free(X[i]);