ae2f_docs
MlpTrainXOR-Primal.c
Go to the documentation of this file.
1#define ae2f_NEED_CLASS 0
2
3#include <ae2f/Ann/Act.h>
4#include <ae2f/Ann/Mlp.h>
5#include <ae2f/Ann/Slp.h>
6
7#include <assert.h>
8#include <stdio.h>
9#include <math.h>
10#include <stdlib.h>
11#include <time.h>
12
13static void Act(ae2f_float* r, ae2f_float_t x) {
14 *r = 1.0 / (1.0 + exp(-x));
15}
16
17static void ActDeriv(ae2f_float_t* r, ae2f_float_t output) {
18 *r = output * (1.0 - output);
19}
20
21static void LossDeriv(ae2f_float_t* r, const ae2f_float_t* output, const ae2f_float_t* target, size_t i, size_t c) {
22 *r = ((output[i] - target[i]) / c);
23}
24
25const ae2f_float_t inp[4][2] = {
26 {0, 0},
27 {0, 1},
28 {1, 0},
29 {1, 1}
30};
31
32const ae2f_float_t goal_xor[4] = {0, 1, 1, 0};
33
35
36#define MLP_DEPTH 3
37#define MLP_IN 2
38#define MLP_HID 3
39#define MLP_OUT 1
40#define MLP_OUT_GREATEST MLP_HID // The greatest number of outputs from any single layer
41
42ae2f_AnnMlp_t mlp;
43ae2f_AnnSlp_t slplast;
44
45const size_t mlp_szv[] = { MLP_IN, MLP_HID, MLP_OUT };
46
47// --- Corrected Memory Layout ---
48// The MLP functions expect a flat, padded memory layout for weights and biases.
49// Each layer's weights are expected to start at a fixed stride.
50#define MLP_WEIGHT_STRIDE (MLP_OUT_GREATEST * MLP_OUT_GREATEST)
51#define MLP_BIAS_STRIDE (MLP_OUT_GREATEST)
52
55
58// --- End Corrected Memory Layout ---
59
60ae2f_AnnAct_t* mlp_acts[MLP_DEPTH - 1] = { Act, Act };
61ae2f_AnnAct_t* mlp_actderivs[MLP_DEPTH - 1] = { ActDeriv, ActDeriv };
62
63size_t i, j, k;
64
65union TEST_STACK {
66 ae2f_AnnMlpPredictStream_t m_predictsteam;
67 ae2f_AnnSlpFetchDelta_t m_fetch;
68 ae2f_AnnMlpPropagateAll_t m_propagate;
69} __test_stack;
70
71int main() {
72 puts("MlpTrainXOR-Primal start");
73
74 puts("Configuring mlp");
75 mlp.m_outc = MLP_OUT_GREATEST;
76 mlp.m_depth = MLP_DEPTH;
77
78 puts("Configuring last slp");
79 slplast.m_inc = MLP_HID;
80 slplast.m_outc = MLP_OUT;
81
82 srand(0);
83
84 puts("Initializing weights randomly with correct memory layout");
85 // Layer 0: 2 inputs -> 3 neurons
86 size_t weight_base_l0 = 0 * MLP_WEIGHT_STRIDE;
87 size_t bias_base_l0 = 0 * MLP_BIAS_STRIDE;
88 for (i = 0; i < mlp_szv[1]; i++) { // 3 output neurons
89 for (k = 0; k < mlp_szv[0]; k++) { // 2 input weights
90 mlp_weights[weight_base_l0 + i * mlp_szv[0] + k] = ((double)rand() / RAND_MAX) - 0.5;
91 }
92 mlp_bias[bias_base_l0 + i] = ((double)rand() / RAND_MAX) - 0.5;
93 }
94
95 // Layer 1: 3 inputs -> 1 neuron
96 size_t weight_base_l1 = 1 * MLP_WEIGHT_STRIDE;
97 size_t bias_base_l1 = 1 * MLP_BIAS_STRIDE;
98 for (i = 0; i < mlp_szv[2]; i++) { // 1 output neuron
99 for (k = 0; k < mlp_szv[1]; k++) { // 3 input weights
100 mlp_weights[weight_base_l1 + i * mlp_szv[1] + k] = ((double)rand() / RAND_MAX) - 0.5;
101 }
102 mlp_bias[bias_base_l1 + i] = ((double)rand() / RAND_MAX) - 0.5;
103 }
104
105 puts("See first output (before training)");
106 for(i = 0; i < 4; ++i) {
108 __test_stack.m_predictsteam, mlp, inp[i], output, mlp_szv,
110 );
111 printf("Initial Output for [%d, %d]: %f (goal: %d)\n"
112 , (int)inp[i][0], (int)inp[i][1], output[0], (int)goal_xor[i]);
113 }
114
115 puts("Training...");
116 for(j = 0; j < 9000; ++j) {
117 for(i = 0; i < 4; ++i) {
119 __test_stack.m_predictsteam, mlp, inp[i], output, mlp_szv,
121 );
122
124 __test_stack.m_fetch, slplast, output, &goal_xor[i],
125 ActDeriv, LossDeriv, &mlp_deltastream[MLP_OUT_GREATEST]
126 );
127
129 __test_stack.m_propagate, mlp, inp[i]
133 0.6, 0.5,
135 );
136 }
137 }
138 puts("Training complete.");
139
140 puts("See last output after training");
141 for(i = 0; i < 4; ++i) {
143 __test_stack.m_predictsteam, mlp, inp[i], output, mlp_szv,
145 );
146 printf("Final Output for [%d, %d]: %f (goal: %d)\n"
147 , (int)inp[i][0], (int)inp[i][1], output[0], (int)goal_xor[i]
148 );
149 }
150
151 return 0;
152}
void ae2f_AnnAct_t(ae2f_float_t *ret, ae2f_float_t x)
Customisable activasion function type.
Definition Act.h:19
#define ae2f_float
Predefined floating point type.
Definition Float.auto.h:17
ae2f_float ae2f_float_t
Definition Float.h:38
#define MLP_OUT_GREATEST
#define MLP_IN
#define MLP_WEIGHT_STRIDE
ae2f_float_t output[1]
const ae2f_float_t goal_xor[4]
ae2f_float_t mlp_weights[(MLP_DEPTH - 1) *MLP_WEIGHT_STRIDE]
ae2f_AnnSlp_t slplast
#define MLP_BIAS_STRIDE
ae2f_float_t mlp_bias[(MLP_DEPTH - 1) *MLP_BIAS_STRIDE]
ae2f_float_t mlp_outstream[(MLP_DEPTH - 1) *MLP_OUT_GREATEST]
#define MLP_HID
const size_t mlp_szv[]
ae2f_AnnMlp_t mlp
const ae2f_float_t inp[4][2]
ae2f_AnnAct_t * mlp_acts[MLP_DEPTH - 1]
ae2f_float_t mlp_deltastream[(MLP_DEPTH - 1) *MLP_OUT_GREATEST]
#define MLP_OUT
ae2f_AnnAct_t * mlp_actderivs[MLP_DEPTH - 1]
#define MLP_DEPTH
#define __ae2f_AnnMlpPredictStream_imp(...)
Definition Mlp.auto.h:491
#define __ae2f_AnnMlpFollow_imp(...)
Definition Mlp.auto.h:549
#define __ae2f_AnnSlpFetchDelta_imp(v_delta, slp, out, out_desired, actderiv_opt, lossderiv, retdelta)
Definition Slp.auto.h:538
ae2f_AnnMlpFollow_t m_propagate
ae2f_AnnMlpPredictStream_t m_predictsteam
ae2f_AnnSlpFetchDelta_t m_fetch