ae2f_docs
MlpTrainXOR-Primal.c
1#define ae2f_NEED_CLASS 0
2
3#include <ae2f/Ann/Act.h>
4#include <ae2f/Ann/Mlp.h>
5#include <ae2f/Ann/Slp.h>
6
7#include <assert.h>
8#include <stdio.h>
9#include <math.h>
10#include <stdlib.h>
11#include <time.h>
12
13static void Act(ae2f_float* r, const ae2f_float_t* x, size_t i, size_t c) {
14 *r = 1.0 / (1.0 + exp(-x[i]));
15}
16
17static void ActDeriv(ae2f_float_t* r, const ae2f_float_t* output, size_t i, size_t c) {
18 *r = output[i] * (1.0 - output[i]);
19}
20
21static void LossDeriv(ae2f_float_t* r, const ae2f_float_t* output, const ae2f_float_t* target, size_t i, size_t c) {
22 *r = ((output[i] - target[i]) / c);
23}
24
25const ae2f_float_t prm_inp[4][2] = {
26 {0, 0},
27 {0, 1},
28 {1, 0},
29 {1, 1}
30};
31
32const ae2f_float_t goal_xor[4] = {0, 1, 1, 0};
33
34ae2f_float_t output[1] = { 0 };
35
36#define MLP_DEPTH 3
37#define MLP_IN 2
38#define MLP_HID 3
39#define MLP_OUT 1
40#define MLP_OUT_GREATEST MLP_HID // The greatest number of outputs from any single layer
41
42ae2f_AnnMlp_t mlp;
43ae2f_AnnSlp_t slplast;
44
45const size_t mlp_szv[] = { MLP_IN, MLP_HID, MLP_OUT };
46
47// --- Corrected Memory Layout ---
48// The MLP functions expect a flat, padded memory layout for weights and biases.
49// Each layer's weights are expected to start at a fixed stride.
50#define MLP_WEIGHT_STRIDE (MLP_OUT_GREATEST * MLP_OUT_GREATEST)
51#define MLP_BIAS_STRIDE (MLP_OUT_GREATEST)
52
53ae2f_float_t mlp_weights[(MLP_DEPTH - 1) * MLP_WEIGHT_STRIDE] = {0};
54ae2f_float_t mlp_bias[(MLP_DEPTH - 1) * MLP_BIAS_STRIDE] = {0};
55
56ae2f_float_t mlp_outstream[(MLP_DEPTH - 1) * MLP_OUT_GREATEST] = {0};
57ae2f_float_t mlp_deltastream[(MLP_DEPTH - 1) * MLP_OUT_GREATEST] = {0};
58// --- End Corrected Memory Layout ---
59
60ae2f_AnnActFFN_t* mlp_acts[MLP_DEPTH - 1] = { Act, Act };
61ae2f_AnnActFFN_t* mlp_actderivs[MLP_DEPTH - 1] = { ActDeriv, ActDeriv };
62
63size_t i, j, k;
64
66 ae2f_AnnMlpPredictStream_t m_predictsteam;
67 ae2f_AnnSlpFetchDelta_t m_fetch;
68 ae2f_AnnMlpBwdAll_t m_Bwd;
69} __test_stack;
70
71int main() {
72 puts("MlpTrainXOR-Primal start");
73
74 puts("Configuring mlp");
75 mlp.m_outc = MLP_OUT_GREATEST;
76 mlp.m_depth = MLP_DEPTH;
77
78 puts("Configuring last slp");
79 slplast.m_inc = MLP_HID;
80 slplast.m_outc = MLP_OUT;
81
82 srand(0);
83
84 puts("Initializing weights randomly with correct memory layout");
85 // Layer 0: 2 prm_inputs -> 3 neurons
86 size_t weight_base_l0 = 0 * MLP_WEIGHT_STRIDE;
87 size_t bias_base_l0 = 0 * MLP_BIAS_STRIDE;
88 for (i = 0; i < mlp_szv[1]; i++) { // 3 output neurons
89 for (k = 0; k < mlp_szv[0]; k++) { // 2 prm_input weights
90 mlp_weights[weight_base_l0 + i * mlp_szv[0] + k] = ((double)rand() / RAND_MAX) - 0.5;
91 }
92 mlp_bias[bias_base_l0 + i] = ((double)rand() / RAND_MAX) - 0.5;
93 }
94
95 // Layer 1: 3 prm_inputs -> 1 neuron
96 size_t weight_base_l1 = 1 * MLP_WEIGHT_STRIDE;
97 size_t bias_base_l1 = 1 * MLP_BIAS_STRIDE;
98 for (i = 0; i < mlp_szv[2]; i++) { // 1 output neuron
99 for (k = 0; k < mlp_szv[1]; k++) { // 3 prm_input weights
100 mlp_weights[weight_base_l1 + i * mlp_szv[1] + k] = ((double)rand() / RAND_MAX) - 0.5;
101 }
102 mlp_bias[bias_base_l1 + i] = ((double)rand() / RAND_MAX) - 0.5;
103 }
104
105 puts("See first output (before training)");
106 for(i = 0; i < 4; ++i) {
108 __test_stack.m_predictsteam, mlp, prm_inp[i], output, mlp_szv,
109 mlp_weights, mlp_bias, mlp_outstream, mlp_acts
110 );
111 printf("Initial Output for [%d, %d]: %f (goal: %d)\n"
112 , (int)prm_inp[i][0], (int)prm_inp[i][1], output[0], (int)goal_xor[i]);
113 }
114
115 puts("Training...");
116 for(j = 0; j < 9000; ++j) {
117 for(i = 0; i < 4; ++i) {
119 __test_stack.m_predictsteam, mlp, prm_inp[i], output, mlp_szv,
120 mlp_weights, mlp_bias, mlp_outstream, mlp_acts
121 );
122
124 __test_stack.m_fetch, slplast
125 , output
126 , &goal_xor[i]
127 , ActDeriv, LossDeriv, &mlp_deltastream[MLP_OUT_GREATEST]
128 );
129
131 __test_stack.m_Bwd, mlp, prm_inp[i]
132 , &mlp_deltastream[MLP_OUT_GREATEST],
133 mlp_szv, mlp_outstream, mlp_deltastream,
134 mlp_weights, mlp_bias,
135 0.6, 0.5,
136 mlp_actderivs
137 );
138 }
139 }
140 puts("Training complete.");
141
142 puts("See last output after training");
143 for(i = 0; i < 4; ++i) {
145 __test_stack.m_predictsteam, mlp, prm_inp[i], output, mlp_szv,
146 mlp_weights, mlp_bias, mlp_outstream, mlp_acts
147 );
148 printf("Final Output for [%d, %d]: %f (goal: %d)\n"
149 , (int)prm_inp[i][0], (int)prm_inp[i][1], output[0], (int)goal_xor[i]
150 );
151 }
152
153 return 0;
154}
#define ae2f_float
Definition Float.auto.h:17
#define MLP_OUT_GREATEST
#define MLP_IN
#define MLP_WEIGHT_STRIDE
#define MLP_BIAS_STRIDE
#define MLP_HID
#define MLP_OUT
#define MLP_DEPTH
#define __ae2f_AnnMlpPredictStream_imp(v_predict, mlp, inp, out, sz, weight, bias, outcache, act_opt)
Definition Mlp.auto.h:554
#define __ae2f_AnnMlpFollow_imp(v_follow, mlp, inp, delta, lenv, outstream, deltacache, weight, bias, lr_w, lr_b, actderiv)
Definition Mlp.auto.h:612
#define __ae2f_AnnSlpFetchDelta_imp(tmp_delta, prm_slp, pprm_out, pprm_out_desired, fn_actderiv, fn_lossderiv, pret_delta)
Definition Slp.auto.h:586