ae2f_docs
MlpTrainXOR-Made-Primal.c
Go to the documentation of this file.
1#define ae2f_NEED_CLASS 0
2
3#include <assert.h>
4#include <ae2f/Ann/Act.h>
5
6#include <ae2f/Ann/Mlp.h>
7#include <ae2f/Ann/Slp.h>
8#include <stdio.h>
9#include <math.h>
10#include <stdlib.h>
11#include <time.h>
12
13static void Act(ae2f_float_t* r, ae2f_float_t x) {
14 *(r) = 1.0 / (1.0 + exp(-x));
15}
16
17static void ActDeriv(ae2f_float_t* r, ae2f_float_t output) {
18 *r = output * (1.0 - output);
19}
20
21static void LossDeriv(ae2f_float_t* r, const ae2f_float_t* output, const ae2f_float_t* target, size_t i, size_t c) {
22 *r = ((output[i] - target[i]) / c);
23}
24
25const ae2f_float_t inp[4][2] = {
26 {0, 0},
27 {0, 1},
28 {1, 0},
29 {1, 1}
30};
31
32const ae2f_float_t goal_xor[4] = {0, 1, 1, 0};
33
35
36#define MLP_DEPTH 4
37#define MLP_IN 2
38#define MLP_HID 3
39#define MLP_OUT 1
40#define MLP_OUT_GREATEST MLP_HID // The greatest number of outputs from any single layer
41
42ae2f_AnnMlp_t mlp;
43ae2f_AnnSlp_t slplast;
44
45const size_t mlp_szv[] = { MLP_IN, MLP_HID, 1, MLP_OUT };
46
47// --- Corrected Memory Layout ---
48// The MLP functions expect a flat, padded memory layout for weights and biases.
49// Each layer's weights are expected to start at a fixed stride.
50#define MLP_WEIGHT_STRIDE (MLP_OUT_GREATEST * MLP_OUT_GREATEST)
51#define MLP_BIAS_STRIDE (MLP_OUT_GREATEST)
52
55
58// --- End Corrected Memory Layout ---
59
60ae2f_AnnAct_t* mlp_acts[MLP_DEPTH - 1] = { Act, Act, Act };
61ae2f_AnnAct_t* mlp_actderivs[MLP_DEPTH - 1] = { ActDeriv, ActDeriv, ActDeriv };
62
63size_t i, j, k;
64
66 ae2f_AnnMlpPredictStream_t m_predictsteam;
67 ae2f_AnnSlpFetchDelta_t m_fetch;
68 ae2f_AnnMlpFollow_t m_propagate;
69 ae2f_AnnMlpTrain_t m_train;
70} __test_stack;
71
72int main() {
73 puts("MlpTrainXOR-Primal start");
74
75 puts("Configuring mlp");
76 mlp.m_outc = MLP_OUT_GREATEST;
77 mlp.m_depth = MLP_DEPTH;
78
79 puts("Configuring last slp");
80 slplast.m_inc = MLP_HID;
81 slplast.m_outc = MLP_OUT;
82
83 srand(0);
84
85 puts("Initializing weights randomly with correct memory layout");
86 // Layer 0: 2 inputs -> 3 neurons
87 size_t weight_base_l0 = 0 * MLP_WEIGHT_STRIDE;
88 size_t bias_base_l0 = 0 * MLP_BIAS_STRIDE;
89 for (i = 0; i < mlp_szv[1]; i++) { // 3 output neurons
90 for (k = 0; k < mlp_szv[0]; k++) { // 2 input weights
91 mlp_weights[weight_base_l0 + i * mlp_szv[0] + k] = ((double)rand() / RAND_MAX) - 0.5;
92 }
93 mlp_bias[bias_base_l0 + i] = ((double)rand() / RAND_MAX) - 0.5;
94 }
95
96 // Layer 1: 3 inputs -> 1 neuron
97 size_t weight_base_l1 = 1 * MLP_WEIGHT_STRIDE;
98 size_t bias_base_l1 = 1 * MLP_BIAS_STRIDE;
99 for (i = 0; i < mlp_szv[2]; i++) { // 1 output neuron
100 for (k = 0; k < mlp_szv[1]; k++) { // 3 input weights
101 mlp_weights[weight_base_l1 + i * mlp_szv[1] + k] = ((double)rand() / RAND_MAX) - 0.5;
102 }
103 mlp_bias[bias_base_l1 + i] = ((double)rand() / RAND_MAX) - 0.5;
104 }
105
106 puts("See first output (before training)");
107 for(i = 0; i < 4; ++i) {
109 __test_stack.m_predictsteam, mlp, inp[i], output, mlp_szv,
111 );
112 printf("Initial Output for [%d, %d]: %f (goal: %d)\n"
113 , (int)inp[i][0], (int)inp[i][1], output[0], (int)goal_xor[i]);
114 }
115
116 puts("Training...");
117 for(j = 0; j < 99000; ++j) {
118 for(i = 0; i < 4; ++i) {
119#if 0
120 __ae2f_AnnMlpPredictStream_imp(
121 __test_stack.m_predictsteam, mlp, inp[i], output, mlp_szv,
122 mlp_weights, mlp_bias, mlp_outstream, mlp_acts
123 );
124
125 __ae2f_AnnSlpFetchDelta_imp(
126 __test_stack.m_fetch, slplast, output, &goal_xor[i],
127 ActDeriv, LossDeriv, &mlp_deltastream[MLP_OUT_GREATEST]
128 );
129
130 __ae2f_AnnMlpFollow_imp(
131 __test_stack.m_propagate, mlp, inp[i]
132 , &mlp_deltastream[MLP_OUT_GREATEST],
133 mlp_szv, mlp_outstream, mlp_deltastream,
134 mlp_weights, mlp_bias,
135 0.6, 0.5,
136 mlp_actderivs
137 );
138#else
140 -1, ae2f_NONE
141 , __test_stack.m_train, mlp
142 , inp[i], output, &goal_xor[i]
145 , 0.2, 0.3
146 , mlp_acts, mlp_actderivs, LossDeriv
147 );
148#endif
149 }
150 }
151 puts("Training complete.");
152
153 puts("See last output after training");
154 for(i = 0; i < 4; ++i) {
156 __test_stack.m_predictsteam, mlp, inp[i], output, mlp_szv,
158 );
159 printf("Final Output for [%d, %d]: %f (goal: %d)\n"
160 , (int)inp[i][0], (int)inp[i][1], output[0], (int)goal_xor[i]
161 );
162 }
163
164 return 0;
165}
void ae2f_AnnAct_t(ae2f_float_t *ret, ae2f_float_t x)
Customisable activasion function type.
Definition Act.h:19
#define ae2f_NONE
Literally nothing.
Definition Cxx.h:13
ae2f_float ae2f_float_t
Definition Float.h:38
#define MLP_OUT_GREATEST
#define MLP_IN
#define MLP_WEIGHT_STRIDE
ae2f_float_t output[1]
const ae2f_float_t goal_xor[4]
ae2f_float_t mlp_weights[(MLP_DEPTH - 1) *MLP_WEIGHT_STRIDE]
ae2f_AnnSlp_t slplast
#define MLP_BIAS_STRIDE
ae2f_float_t mlp_bias[(MLP_DEPTH - 1) *MLP_BIAS_STRIDE]
ae2f_float_t mlp_outstream[(MLP_DEPTH - 1) *MLP_OUT_GREATEST]
#define MLP_HID
const size_t mlp_szv[]
ae2f_AnnMlp_t mlp
const ae2f_float_t inp[4][2]
ae2f_AnnAct_t * mlp_acts[MLP_DEPTH - 1]
ae2f_float_t mlp_deltastream[(MLP_DEPTH - 1) *MLP_OUT_GREATEST]
#define MLP_OUT
ae2f_AnnAct_t * mlp_actderivs[MLP_DEPTH - 1]
#define MLP_DEPTH
#define __ae2f_AnnMlpPredictStream_imp(...)
Definition Mlp.auto.h:491
#define __ae2f_AnnMlpTrainPrimal_imp(OPER_NEG, OPER_NONE, v_train, mlp, inp, out, out_desired, lenv, outstream, deltacache, weight, bias, learningrate, learningrate_bias, act, actderiv, lossderiv)
Definition Mlp.auto.h:767
ae2f_AnnMlpFollow_t m_propagate
ae2f_AnnMlpPredictStream_t m_predictsteam
ae2f_AnnSlpFetchDelta_t m_fetch
ae2f_AnnMlpTrain_t m_train