ae2f_docs
Mlp.def.cc
Go to the documentation of this file.
1#include <ae2f/Ann/Util.h>
2
3#if !ae2f_MAC_BUILD || !__ae2f_MACRO_GENERATED
4#include <assert.h>
5#include <stdlib.h>
6#endif
7
8#if !__ae2f_MACRO_GENERATED
9#include <ae2f/Macro.h>
10#include <ae2f/Ann/Slp.h>
11#endif
12
13#ifndef ae2f_Ann_Mlp_c
14
16#include <ae2f/Ann/Mlp.h>
17#endif
18
20#define OPER_NEG
21#define OPER_NONE
22#endif
23
24#define ae2f_Ann_Mlp_c
25
27
28#define __ae2f_AnnMlpDel_C(a) free(ae2f_reinterpret_cast(void*, a))
29
30ae2f_MAC() _ae2f_AnnMlpMk_C(
31 ae2f_err_t* const reterr
32 , ae2f_AnnMlp** const retmk
33
34 , const size_t depth
35 , const size_t* const szvector
36 , ae2f_opt size_t* const szswap_opt
37
38 , ae2f_opt ae2f_AnnAct_t** const act
39 , ae2f_opt ae2f_AnnAct_t** const actderiv
40 , ae2f_AnnLoss_t* const lossderiv
41
42 , ae2f_opt ae2f_float_t* const deltastream
43 , ae2f_opt ae2f_float_t* const outcache
44 , ae2f_opt ae2f_float_t* const weight
45 , ae2f_opt ae2f_float_t* const bias
46
47 , ae2f_float_t const learningrate
48 , ae2f_float_t const learningrate_bias
49
50 , ae2f_opt const size_t offset
51, ae2f_opt const size_t extra
52) {
53 if((reterr) && *(reterr)) {}
54 else unless((szvector) && (lossderiv) && (retmk))
55 (reterr) && (*(reterr) |= ae2f_errGlob_PTR_IS_NULL);
56 else {
57 ae2f_AnnMlpMk_t v_mk;
59 v_mk
60 , depth, szvector, szswap_opt
61 , act, actderiv, lossderiv
62 , deltastream, outcache, weight
63 , bias, learningrate, learningrate_bias
64 , offset, extra
65 );
66
67 assert(v_mk.m_mkbase && "Initialising has failed");
68 *(retmk) = v_mk.m_mkbase;
69 unless(v_mk.m_mkbase) {
70 (reterr) && (*(reterr) |= ae2f_errGlob_ALLOC_FAILED);
71 }
72 }
73}
74
75#else
76
77#define __ae2f_AnnMlpDel_C(...)
78 typedef char NO_ae2f_NEED_CLASS[-1]
79
80#define __ae2f_AnnMlpMk_C(...)
81 typedef char NO_ae2f_NEED_CLASS[-1]
82
83
84#endif
85
87
88ae2f_MAC() _ae2f_AnnMlpMk_imp(
89 ae2f_AnnMlpMk_t v_mk
90 , const size_t depth
91
92 , const size_t* const szvector
93 , size_t* const szswap_opt
94
95 , ae2f_opt ae2f_AnnAct_t** const act
96 , ae2f_opt ae2f_AnnAct_t** const actderiv
97 , ae2f_AnnLoss_t* const lossderiv
98
99 , ae2f_opt ae2f_float_t* const deltastream
100 , ae2f_opt ae2f_float_t* const outcache
101 , ae2f_opt ae2f_float_t* const weight
102 , ae2f_opt ae2f_float_t* const bias
103
104 , ae2f_float_t const learningrate
105 , ae2f_float_t const learningrate_bias
106
107 , ae2f_opt const size_t offset
108 , ae2f_opt const size_t extra
109)
110{
111 assert((szvector) && "Size vector is null");
112 assert((lossderiv) && "Loss deriv must be initialised");
113 assert((depth) > 2 && "Depth must be greater than 2");
114
115 (v_mk).m_outc = 2;
116 for((v_mk).m_i = (depth); (v_mk).m_i--; ) {
117 assert((szvector)[(v_mk).m_i] && "Zero value is permitted");
118 (v_mk).m_outc < (szvector)[(v_mk).m_i] && ((v_mk).m_outc = (szvector)[(v_mk).m_i]);
119 }
120
122 (v_mk).m_i
123 , (v_mk).m_outc
124 , depth, szswap_opt
125 , act, actderiv
126 , deltastream, outcache
127 , weight, bias
128 );
129
130 (v_mk).m_mkptr.m_void = calloc(1, (v_mk).m_i + (offset) + (extra));
131 (v_mk).m_mkbase = (v_mk).m_mkptr.m_mlp;
132
133 if((v_mk).m_mkptr.m_void) {
135 *(v_mk).m_mkptr.m_mlp
136 , (v_mk).m_i
137 , depth
138 , (v_mk).m_outc
139 , szvector
140 , (szswap_opt) ? (szswap_opt) : ae2f_reinterpret_cast(
141 size_t*
142 , (v_mk).m_mkptr.m_mlp + 1)
143 , act
144 , actderiv
145 , lossderiv
146 , deltastream
147 , outcache
148 , weight
149 , bias
150 , learningrate
151 , learningrate_bias
152 );
153
154 (v_mk).m_mkptr.m_mlp += 1;
155 (v_mk).m_mkptr.m_byte += (offset);
156 (v_mk).m_mkptr.m_sz += (depth);
157
158 unless(act) {
159 (v_mk).m_mkbase->m_act = (v_mk).m_mkptr.m_Act;
160 (v_mk).m_mkptr.m_Act += (depth) - 1;
161 }
162
163 unless(actderiv) {
164 (v_mk).m_mkbase->m_actderiv = (v_mk).m_mkptr.m_Act;
165 (v_mk).m_mkptr.m_Act += (depth) - 1;
166 }
167
168 unless(deltastream) {
169 (v_mk).m_mkbase->m_deltastream = (v_mk).m_mkptr.m_f;
170 (v_mk).m_mkptr.m_f += ((depth) - 1) * (v_mk).m_outc;
171 }
172
173 unless(outcache) {
174 (v_mk).m_mkbase->m_outcache = (v_mk).m_mkptr.m_f;
175 (v_mk).m_mkptr.m_f += ((depth) - 1) * (v_mk).m_outc;
176 }
177
178 unless(bias) {
179 (v_mk).m_mkbase->m_bias = (v_mk).m_mkptr.m_f;
180 (v_mk).m_mkptr.m_f += ((depth) - 1) * (v_mk).m_outc;
181 }
182
183 unless(weight) {
184 (v_mk).m_mkbase->m_weight = (v_mk).m_mkptr.m_f;
185 }
186 }
187}
188
189ae2f_MAC() _ae2f_AnnMlpSz_imp(
190 size_t ret_sz
191 , const size_t outc
192 , const size_t depth
193 , ae2f_opt const size_t* const szswap
194 , ae2f_opt ae2f_AnnAct_t** const act
195 , ae2f_opt ae2f_AnnAct_t** const actderiv
196 , ae2f_opt ae2f_float_t* const deltastream
197 , ae2f_opt ae2f_float_t* const outcache
198 , ae2f_opt ae2f_float_t* const weight
199 , ae2f_opt ae2f_float_t* const bias
200 )
201{
202 assert((depth) > 2);
203
204 (ret_sz) = sizeof(ae2f_AnnMlp) + (!(szswap)) * sizeof(size_t);
205 (ret_sz) += (sizeof(void*) * ((depth) - 1)) * (!(act) + !(actderiv));
206 (ret_sz) += sizeof(ae2f_float_t)
207 * ((depth)) * (outc)
208 * (/**/
209 !(deltastream) + !(outcache) + !(bias)
210 + ((!(weight)) * (outc))
211 );
212}
213
214ae2f_MAC() _ae2f_AnnMlpInitWithOutSz_imp(
215 ae2f_AnnMlp v_mlp
216 , size_t v_init
217 , const size_t depth
218 , const size_t outsz
219
220 , const size_t* const szvector
221 , size_t* const szswap_opt
222
223 , ae2f_opt ae2f_AnnAct_t** const act
224 , ae2f_opt ae2f_AnnAct_t** const actderiv
225 , ae2f_AnnLoss_t* const lossderiv
226
227 , ae2f_opt ae2f_float_t* const deltastream
228 , ae2f_opt ae2f_float_t* const outcache
229 , ae2f_opt ae2f_float_t* const weight
230 , ae2f_opt ae2f_float_t* const bias
231
232 , ae2f_float_t const learningrate
233 , ae2f_float_t const learningrate_bias
234 )
235{
236 assert((depth) >= 2 && "At lest you need input and output layer");
237 (v_mlp).m_depth = (depth);
238 (v_mlp).m_outc = (outsz);
239
240 assert((lossderiv) && "loss deriv is null");
241 (v_mlp).m_lossderiv = lossderiv;
242
243 (v_mlp).m_sz = (szswap_opt);
244 (v_mlp).m_act = (act);
245 (v_mlp).m_actderiv = (actderiv);
246
247 (v_mlp).m_deltastream = deltastream;
248 (v_mlp).m_outcache = outcache;
249 (v_mlp).m_weight = weight;
250 (v_mlp).m_bias = bias;
251
252 (v_mlp).m_learningrate = learningrate;
253 (v_mlp).m_learningrate_bias = learningrate_bias;
254
255 if((szswap_opt) && (szswap_opt) != (szvector))
256 for((v_init) = (depth); (v_init)--; ) {
257 (szswap_opt)[(v_init)] = (szvector)[(v_init)];
258 }
259}
260
261ae2f_MAC() _ae2f_AnnMlpInit_imp(
262 ae2f_AnnMlp v_mlp
263 , ae2f_AnnMlpInit_t v_init
264 , const size_t depth
265 , const size_t* const szvector
266 , size_t* const szswap_opt
267
268 , ae2f_opt ae2f_AnnAct_t** const act
269 , ae2f_opt ae2f_AnnAct_t** const actderiv
270 , ae2f_AnnLoss_t* const lossderiv
271
272 , ae2f_opt ae2f_float_t* const deltastream
273 , ae2f_opt ae2f_float_t* const outcache
274 , ae2f_opt ae2f_float_t* const weight
275 , ae2f_opt ae2f_float_t* const bias
276
277 , ae2f_float_t const learningrate
278 , ae2f_float_t const learningrate_bias
279 )
280{
281 (v_init).m_outc = 0;
282
283 assert((szvector) && "Size vector is null");
284 for((v_init).m_i = (depth); (v_init).m_i--; ) {
285 assert((szvector)[(v_init).m_i] && "Zero value is permitted");
286 (v_init).m_outc < (szvector)[(v_init).m_i] && ((v_init).m_outc = (szvector)[(v_init).m_i]);
287 }
288
290 v_mlp, (v_init).m_i, depth, (v_mlp).m_outc
291 , szvector, szswap_opt, act, actderiv, lossderiv
292 , deltastream, outcache, weight, bias, learningrate, learningrate_bias
293 );
294}
295
296#else
297
298#define __ae2f_AnnMlpMk_imp(...)
299 typedef char NO_ae2f_NEED_CLASS[-1]
300
301#define __ae2f_AnnMlpSz_imp(...)
302 typedef char NO_ae2f_NEED_CLASS[-1]
303
304#define __ae2f_AnnMlpInitWithOutSz_imp(...)
305 typedef char NO_ae2f_NEED_CLASS[-1]
306
307#define __ae2f_AnnMlpInit_imp(...)
308 typedef char NO_ae2f_NEED_CLASS[-1]
309
310#endif
311
312/** @brief layer must be more than 2 */
313ae2f_MAC(OPER_NEG, OPER_NONE, ) _ae2f_AnnMlpPredictPrimal_imp(
314 ae2f_AnnMlpPredict_t v_predict
315 , const ae2f_AnnMlp_t mlp
316
317 , const ae2f_float_t* const inp
319
320 , const size_t* const sz
321 , const ae2f_float_t* const weight
322 , const ae2f_float_t* const bias
324
325 , ae2f_AnnAct_t* const * const act_opt
326 )
327{
328 assert((mlp).m_depth > 2);
329
330 (v_predict).m_depth = (mlp).m_depth - 1;
331 (v_predict).m_outc_max = (mlp).m_outc;
332
333 (v_predict).m_inc = (sz)[0];
334 (v_predict).m_outc = (sz)[1];
335
336 if((act_opt)[0]) {
338 (v_predict)
339 , (v_predict)
340 , inp
341 , ((outcache) + (0 OPER_NONE) * (v_predict).m_outc_max)
342 , weight
343 , bias
344 , (act_opt)[0]
345 );
346 } else {
348 (v_predict)
349 , (v_predict)
350 , inp
351 , (outcache + (0 OPER_NONE) * (v_predict).m_outc_max)
352 , weight
353 , bias
354 ,
355 );
356 }
357
358
359 for(
360 (v_predict).m_k = 1;
361 (v_predict).m_k < (v_predict).m_depth - 1;
362 (v_predict).m_k++
363 )
364 {
365 (v_predict).m_inc = (v_predict).m_outc;
366 (v_predict).m_outc = (sz)[(v_predict).m_k + 1];
367
368 if((act_opt)[(v_predict).m_k]) {
369 assert((v_predict).m_k);
370 assert(((v_predict).m_k OPER_NEG) != ((v_predict).m_k OPER_NONE));
371 assert(((v_predict).m_k OPER_NEG) == (((v_predict).m_k - 1) OPER_NONE));
372 assert((((v_predict).m_k + 1) OPER_NEG) == (((v_predict).m_k) OPER_NONE));
373
375 (v_predict)
376 , (v_predict)
377 , ((outcache) + (((v_predict).m_k OPER_NEG) * (v_predict).m_outc_max))
378 , ((outcache) + (((v_predict).m_k OPER_NONE) * (v_predict).m_outc_max))
379 , (weight) + ((v_predict).m_k) * (
380 (v_predict).m_outc_max
381 * (v_predict).m_outc_max
382 )
383 , (bias) + ((v_predict).m_k) * (v_predict).m_outc_max
384 , (act_opt)[(v_predict).m_k]
385 );
386 } else {
388 (v_predict)
389 , (v_predict)
390 , ((outcache) + (
391 ((v_predict).m_k OPER_NEG)
392 * (v_predict).m_outc_max
393 ))
394 , ((outcache) + (
395 ((v_predict).m_k OPER_NONE)
396 * (v_predict).m_outc_max
397 ))
398 , (weight)
399 + (((v_predict).m_k)
400 * (v_predict).m_outc_max
401 * (v_predict).m_outc_max)
402 , (bias) + ((v_predict).m_k) * (v_predict).m_outc_max
403 ,
404 );
405 }
406 }
407
408 (v_predict).m_inc = (v_predict).m_outc;
409 (v_predict).m_outc = (sz)[(v_predict).m_k + 1];
410 assert((v_predict).m_k == (mlp).m_depth - 2);
411
412 if((act_opt)[(v_predict).m_k]) {
414 (v_predict)
415 , (v_predict)
416 , ((outcache) + ((((v_predict).m_k OPER_NEG)) * (v_predict).m_outc_max))
417 , (out)
418 , (weight) + ((v_predict).m_k) * (v_predict).m_outc_max * (v_predict).m_outc_max
419 , (bias) + ((v_predict).m_k) * (v_predict).m_outc_max
420 , (act_opt)[(v_predict).m_k]
421 );
422 } else {
424 (v_predict)
425 , (v_predict)
426 , ((outcache) + ((((v_predict).m_k OPER_NEG)) * (v_predict).m_outc_max))
427 , (out)
428 , ((weight) + ((v_predict).m_k) * (v_predict).m_outc_max * (v_predict).m_outc_max)
429 , ((bias) + ((v_predict).m_k) * (v_predict).m_outc_max)
430 ,
431 );
432 }
433}
434
436ae2f_MAC(OPER_NEG, OPER_NONE, ) _ae2f_AnnMlpPredictPrimal(
437 ae2f_err_t* reterr
438 , const ae2f_AnnMlp* const mlp
439 , const ae2f_float_t* const inp
440 , ae2f_float_t* const out
441 )
442{
443 if((reterr) && *(reterr))
444 ;
445 else unless((mlp) && (inp) && (out)) {
446 assert(0 && "Null");
447 (reterr) && (*(reterr) |= ae2f_errGlob_PTR_IS_NULL);
448 } else {
449 ae2f_AnnMlpPredict_t v_predict;
450
453 , v_predict, *(mlp)
454 , inp, out
455 , (mlp)->m_sz, (mlp)->m_weight
456 , (mlp)->m_bias, (mlp)->m_outcache
457 , (mlp)->m_act
458 );
459 }
460}
461#else
462
463#define __ae2f_AnnMlpPredictPrimal(...)
464 typedef char NO_ae2f_NEED_CLASS[-1]
465
466#endif
467
468
469#define __ae2f_AnnMlpPredictStream_imp(...)
470 __ae2f_AnnMlpPredictPrimal_imp(-1, , __VA_ARGS__)
471
472#define __ae2f_AnnMlpPredictStream_C(reterr, mlp, inp, out)
473 __ae2f_AnnMlpPredictPrimal(-1, , reterr, mlp, inp, out)
474
475#define __ae2f_AnnMlpPredict_imp(...)
476 __ae2f_AnnMlpPredictPrimal_imp(&1 ? 0 : 1, &1, __VA_ARGS__)
477
478#define __ae2f_AnnMlpPredict_C(...)
479 __ae2f_AnnMlpPredictPrimal(&1 ? 0 : 1, &1, __VA_ARGS__)
480
481ae2f_MAC() _ae2f_AnnMlpHidDeltaSingle_imp(
482 ae2f_AnnMlpHidDeltaSingle_t v_single
483 , const ae2f_AnnSlp_t slp
484 , const ae2f_float_t* const weight
485 , const ae2f_float_t* const delta
486 , const size_t iidx
487 )
488{
489 (v_single).m_ret = 0;
490
491 for((v_single).m_i = (slp).m_outc; (v_single).m_i--; )
492 {
493 (v_single).m_ret +=
494 ((weight) + (slp).m_inc * (v_single).m_i)[iidx] * (delta)[(v_single).m_i];
495 }
496}
497
498/** @brief delta to delta */
499ae2f_MAC() _ae2f_AnnMlpPropagate_imp(
500 ae2f_float_t v_tmp,
501 size_t v_send
502 , const ae2f_AnnSlp_t slp_then
504
505 , const ae2f_float_t* const deltaseed
507
508 , const ae2f_float_t* const inp
509 )
510{
511 for((v_send) = (slp_then).m_outc; (v_send)--;) {
512 actderiv_then(&(v_tmp), (inp)[v_send]);
513 (retdelta_then)[v_send] = (v_tmp) * (deltaseed)[v_send];
514 }
515}
516
517#define __ae2f_AnnMlpFollowStream_imp(...)
519
520
521#define __ae2f_AnnMlpFollow_imp(...)
522 __ae2f_AnnMlpFollowPrimal_imp(&1 ? 0 : 1,&1, __VA_ARGS__)
523
524ae2f_MAC(OPER_NEG, OPER_NONE,) _ae2f_AnnMlpFollowPrimal_imp(
525 ae2f_AnnMlpFollow_t v_follow
526 , const ae2f_AnnMlp_t mlp
527
528 , const ae2f_float_t* const inp
529 , const ae2f_float_t* const delta
530 , const size_t* const lenv
531
532 , const ae2f_float_t* const outstream
533
534 , ae2f_float_t* const deltacache
535 , ae2f_float_t* const weight
536 , ae2f_float_t* const bias
537
538 , const ae2f_float_t learningrate
539 , const ae2f_float_t learningrate_bias
540
541 , ae2f_AnnAct_t* const * const actderiv
542 )
543{
544 assert(((mlp).m_depth > 2) && "m_depth must be more than 1.");
545 assert((inp) && "inp is null");
546 assert((weight) && "weight is null");
547 assert((bias) && "bias is null");
548 assert((actderiv) && "actderiv list is null");
549 assert((delta) && "delta is null");
550 assert(lenv);
551
552 /** m_k: index for then */
553 (v_follow).m_k = (mlp).m_depth - 2;
554
555 /** permanent */
556 (v_follow).m_pg_weight = (v_follow).m_pg_out = (mlp).m_outc;
557 (v_follow).m_pg_weight *= (v_follow).m_pg_out;
558
559 (v_follow).m_outc = (lenv)[(v_follow).m_k + 1];
560 (v_follow).m_inc = (lenv)[(v_follow).m_k];
561
562 /**
563 * out field update (delta is specified)
564 * */
566 v_follow
567 , (v_follow)
568 , ((outstream) + (v_follow).m_pg_out * ((v_follow).m_k - 1))
569 , (delta)
570 , ((weight) + ((v_follow).m_pg_weight * ((v_follow).m_k)))
571 , ((bias) + ((v_follow).m_pg_out * ((v_follow).m_k)))
572 , learningrate
573 , learningrate_bias
574 );
575
576 (v_follow).m_j = (v_follow).m_inc;
577 while((v_follow).m_j--) {
579 v_follow
580 , v_follow
581 , ((weight) + ((v_follow).m_pg_weight * (v_follow).m_k))
582 , (delta)
583 , (v_follow).m_j
584 );
585
586 ((deltacache) + (v_follow).m_pg_out * ((v_follow).m_k OPER_NEG))[(v_follow).m_j]
587 = (v_follow).m_ret;
588 }
589
590 (v_follow).m_outc = (lenv)[(v_follow).m_k];
591 (v_follow).m_inc = (lenv)[(v_follow).m_k - 1];
592
593 /** nxt-delta to then-delta */
594 if((actderiv)[(v_follow).m_k - 1]) {
596 (v_follow).m_ret
597 , (v_follow).m_stack.m_send
598 , (v_follow)
599 , ((deltacache) + ((v_follow).m_pg_out * ((v_follow).m_k OPER_NEG)))
600 , ((deltacache) + ((v_follow).m_pg_out * ((v_follow).m_k OPER_NEG)))
601 , (actderiv)[(v_follow).m_k - 1]
602 , ((outstream) + (v_follow).m_pg_out * ((v_follow).m_k - 1)) /** input of (v_follow).m_k */
603 );
604 } else {
606 (v_follow).m_ret
607 , (v_follow).m_stack.m_send
608 , (v_follow)
609 , ((deltacache) + (((v_follow).m_pg_out) * ((v_follow).m_k OPER_NEG)))
610 , ((deltacache) + (((v_follow).m_pg_out) * ((v_follow).m_k OPER_NEG)))
611 , /** actderiv */
612 , ((outstream) + (v_follow).m_pg_out * ((v_follow).m_k - 1))
613 );
614 }
615
616
617 while(--(v_follow).m_k) {
618 assert((v_follow).m_k);
619 assert(((v_follow).m_k OPER_NEG) != ((v_follow).m_k OPER_NONE));
620 assert(((v_follow).m_k OPER_NEG) == (((v_follow).m_k - 1) OPER_NONE));
621 assert((((v_follow).m_k + 1) OPER_NEG) == (((v_follow).m_k) OPER_NONE));
622
623 /**
624 * out field update (delta is generated)
625 * */
627 (v_follow)
628 , (v_follow)
629 , ((outstream) + (v_follow).m_pg_out * ((v_follow).m_k - 1))
630 , ((deltacache) + (v_follow).m_pg_out * ((v_follow).m_k OPER_NONE))
631 , ((weight) + ((v_follow).m_pg_weight * (v_follow).m_k))
632 , ((bias) + ((v_follow).m_pg_out * ((v_follow).m_k)))
633 , learningrate
634 , learningrate_bias
635 );
636
637 (v_follow).m_j = (v_follow).m_inc;
638 assert((v_follow).m_inc == (lenv)[(v_follow).m_k]);
639
640 while((v_follow).m_j--) {
641 (v_follow).m_ret = 0;
642
644 v_follow
645 , v_follow
646 , ((weight) + (v_follow).m_pg_weight * ((v_follow).m_k))
647 , ((deltacache) + (v_follow).m_pg_out * ((v_follow).m_k OPER_NONE))
648 , (v_follow).m_j
649 );
650
651 ((deltacache) + (v_follow).m_pg_out * ((v_follow).m_k OPER_NEG))[(v_follow).m_j]
652 = (v_follow).m_ret;
653 }
654
655 (v_follow).m_outc = (v_follow).m_inc;
656 (v_follow).m_inc = (lenv)[(v_follow).m_k - 1];
657 assert((v_follow).m_outc == (lenv)[(v_follow).m_k]);
658
659 /** nxt-delta to then-delta */
660 if((actderiv)[(v_follow).m_k - 1]) {
662 (v_follow).m_ret
663 , (v_follow).m_stack.m_send
664 , (v_follow)
665 , ((deltacache) + ((v_follow).m_pg_out * ((v_follow).m_k OPER_NEG)))
666 , ((deltacache) + ((v_follow).m_pg_out * ((v_follow).m_k OPER_NEG)))
667 , (actderiv)[(v_follow).m_k - 1]
668 , ((outstream) + (v_follow).m_pg_out * ((v_follow).m_k - 1))
669 );
670 } else {
672 (v_follow).m_ret
673 , (v_follow).m_stack.m_send
674 , (v_follow)
675 , ((deltacache) + (((v_follow).m_pg_out) * ((v_follow).m_k OPER_NEG)))
676 , ((deltacache) + (((v_follow).m_pg_out) * ((v_follow).m_k OPER_NEG)))
677 , /** actderiv */
678 , ((outstream) + (v_follow).m_pg_out * ((v_follow).m_k - 1))
679 );
680 }
681 }
682
683 assert(((v_follow).m_k) == 0 && "(v_follow).m_k must be zero.");
684 assert((v_follow).m_inc == (lenv)[0] && "inc must be same as first element of lenv.");
685 assert((v_follow).m_outc == (lenv)[1] && "outc must be same as second element of lenv.");
686
688 v_follow
689 , (v_follow)
690 , (inp)
691 , ((deltacache) + (v_follow).m_pg_out * ((v_follow).m_k OPER_NONE))
692 , ((weight))
693 , (bias)
694 , learningrate
695 , learningrate_bias
696 );
697}
698
700ae2f_MAC(OPER_NEG, OPER_NONE,) _ae2f_AnnMlpFollowPrimal(
701 ae2f_err_t* const reterr
702 , const ae2f_AnnMlp* mlp
703
704 , const ae2f_float_t* const inp
705 , const ae2f_float_t* const delta
706 )
707{
708 if((reterr) && *(reterr)) {}
709 else unless((mlp) && (inp) && (delta)) {
710 assert(0 && "nullref");
711 (reterr) && (*(reterr) |= ae2f_errGlob_PTR_IS_NULL);
712 } else {
713 ae2f_AnnMlpFollow_t v_follow;
714
717 , v_follow
718 , *(mlp), inp, delta, (mlp)->m_sz
719 , (mlp)->m_outcache, (mlp)->m_deltastream
720 , (mlp)->m_weight
721 , (mlp)->m_bias
722 , (mlp)->m_learningrate, (mlp)->m_learningrate_bias
723 , (mlp)->m_actderiv
724 );
725 }
726}
727
728#else
729
730#define __ae2f_AnnMlpFollowPrimal(...)
731 typedef char NO_ae2f_NEED_CLASS[-1]
732
733#endif
734
735#define __ae2f_AnnMlpFollow_C(...)
736 __ae2f_AnnMlpFollowPrimal(&1 ? 0 : 1,&1, __VA_ARGS__)
737
738#define __ae2f_AnnMlpFollowStream_C(...)
739 __ae2f_AnnMlpFollowPrimal(-1, , __VA_ARGS__)
740
741ae2f_MAC(OPER_NEG, OPER_NONE, ) _ae2f_AnnMlpTrainPrimal_imp(
742 ae2f_AnnMlpTrain_t v_train
743 , const ae2f_AnnMlp_t mlp
744
745 , const ae2f_float_t* const inp
746 , ae2f_float_t* const out
747 , const ae2f_float_t* const out_desired
748 , const size_t* const lenv
749
750 , ae2f_float_t* const outstream
751
752 , ae2f_float_t* const deltacache
753 , ae2f_float_t* const weight
754 , ae2f_float_t* const bias
755
756 , const ae2f_float_t learningrate
757 , const ae2f_float_t learningrate_bias
758
759 , ae2f_opt ae2f_AnnAct_t* const * const act
760 , ae2f_opt ae2f_AnnAct_t* const * const actderiv
761 , ae2f_AnnLoss_t* lossderiv
762)
763{
764 assert((lenv) && "lengh vector nil");
765 assert(((mlp).m_depth > 2) && "I see no hidden layer");
766 assert(lossderiv);
767 assert((actderiv) && "actderiv list");
768
769 (v_train).m_inc = (lenv)[(mlp).m_depth - 3];
770 (v_train).m_outc = (lenv)[(mlp).m_depth - 2];
771
773 (v_train), mlp, inp, out, lenv,
774 weight, bias, outstream, act
775 );
776
777 if((actderiv)[(mlp).m_depth - 2]) {
779 (v_train), (v_train), (out), (out_desired)
780 , (actderiv)[(mlp).m_depth - 2]
781 , (lossderiv)
782 , (&((deltacache)[(mlp).m_outc * (((mlp).m_depth - 2) OPER_NONE)]))
783 );
784 } else {
786 (v_train), (v_train), (out), (out_desired)
787 , /** actderiv */
788 , (lossderiv)
789 , (&((deltacache)[(mlp).m_outc * (((mlp).m_depth - 2) OPER_NONE)]))
790 );
791
792 }
793
796 , v_train, mlp, inp
797 , (&((deltacache)[(mlp).m_outc * (((mlp).m_depth - 2) OPER_NONE)]))
798 , lenv, outstream
799 , deltacache, weight, bias
800 , learningrate, learningrate_bias
801 , actderiv
802 );
803}
804
806ae2f_MAC(OPER_NEG, OPER_NONE, ) _ae2f_AnnMlpTrainPrimal(
807 ae2f_err_t* const ae2f_opt reterr
808 , ae2f_AnnMlp* const mlp
809 , const ae2f_float_t* const inp
810 , ae2f_float_t* const out
811 , const ae2f_float_t* const out_desired
812 )
813{
814 if((reterr) && *(reterr));
815 else unless((mlp) && (out) && (out_desired) && (inp)) {
816 assert(0 && "nullref");
817 (reterr) && (*(reterr) |= ae2f_errGlob_PTR_IS_NULL);
818 } else {
819 ae2f_AnnMlpTrain_t v_train;
822 , v_train, *(mlp), inp
823 , out, out_desired
824 , (mlp)->m_sz, (mlp)->m_outcache
825 , (mlp)->m_deltastream
826 , (mlp)->m_weight, (mlp)->m_bias
827 , (mlp)->m_learningrate, (mlp)->m_learningrate_bias
828 , (mlp)->m_act, (mlp)->m_actderiv, (mlp)->m_lossderiv
829 );
830 }
831}
832
833ae2f_MAC(OPER_NEG, OPER_NONE, ) _ae2f_AnnMlpTrainAutoPrimal(
834 ae2f_err_t* const ae2f_opt reterr
835 , ae2f_AnnMlp* const mlp
836 , const ae2f_float_t* const inp
837 , const ae2f_float_t* const out_desired
838 )
839{
840 if((reterr) && *(reterr));
841 else unless((mlp) && (out_desired) && (inp)) {
842 assert(0 && "nullref");
843 (reterr) && (*(reterr) |= ae2f_errGlob_PTR_IS_NULL);
844 } else {
845 ae2f_AnnMlpTrain_t v_train;
848 , v_train, *(mlp), inp
849 , &(mlp)->m_outcache[((mlp)->m_outc) * ((mlp)->m_depth - 2)]
850 , out_desired
851 , (mlp)->m_sz, (mlp)->m_outcache
852 , (mlp)->m_deltastream
853 , (mlp)->m_weight, (mlp)->m_bias
854 , (mlp)->m_learningrate, (mlp)->m_learningrate_bias
855 , (mlp)->m_act, (mlp)->m_actderiv, (mlp)->m_lossderiv
856 );
857 }
858}
859
860#else
861#define __ae2f_AnnMlpTrainPrimal(...)
862 typedef char NO_ae2f_NEED_CLASS[-1]
863
864#define __ae2f_AnnMlpTrainAutoPrimal(...)
865 typedef char NO_ae2f_NEED_CLASS[-1]
866
867#endif
868
869
870#define __ae2f_AnnMlpTrain_C(reterr, mlp, inp, out, out_desired)
871 __ae2f_AnnMlpTrainPrimal(&1 ? 0 : 1, &1, reterr, mlp, inp, out, out_desired)
872
873#define __ae2f_AnnMlpTrainStream_C(reterr, mlp, inp, out, out_desired)
874 __ae2f_AnnMlpTrainPrimal(&1 ? 0 : 1, ae2f_NONE, reterr, mlp, inp, out, out_desired)
875
876#define __ae2f_AnnMlpTrain_imp(...)
877 __ae2f_AnnMlpTrainPrimal_imp(&1 ? 0 : 1, &1, __VA_ARGS__)
878
879#define __ae2f_AnnMlpTrainStream_imp(...)
881
882
883/** @see __ae2f_AnnMlpTrainAutoPrimal */
884#define __ae2f_AnnMlpTrainAuto_C(reterr, mlp, inp, out_desired)
885 __ae2f_AnnMlpTrainAutoPrimal(&1 ? 0 : 1, &1, reterr, mlp, inp, out_desired)
886
887/** @see __ae2f_AnnMlpTrainAutoPrimal */
888#define __ae2f_AnnMlpTrainAutoStream_C(reterr, mlp, inp, out_desired)
889 __ae2f_AnnMlpTrainAutoPrimal(-1, ae2f_NONE, reterr, mlp, inp, out_desired)
890
891#endif
void ae2f_AnnAct_t(ae2f_float_t *ret, ae2f_float_t x)
Customisable activasion function type.
Definition Act.h:19
void ae2f_AnnLoss_t(ae2f_float_t *ret, const ae2f_float_t *out, const ae2f_float_t *goal, size_t index, size_t count)
Specify the way of calculating loss.
Definition Act.h:29
#define ae2f_reinterpret_cast(t, v)
Definition Cast.h:52
#define unless(...)
Invokes when condition is false.
Definition Cast.h:103
#define ae2f_NONE
Literally nothing.
Definition Cxx.h:13
ae2f_float ae2f_float_t
Definition Float.h:38
#define ae2f_opt
Definition Guide.h:26
#define ae2f_MAC_BUILD
Definition Util.h:4
#define ae2f_NEED_CLASS
Definition Mlp.cl.c:1
const ae2f_AnnMlp_t const ae2f_float_t *const inp
Definition Mlp.def.cc:318
size_t const ae2f_AnnSlp_t ae2f_float_t *const const ae2f_float_t *const ae2f_AnnAct_t actderiv_then
Definition Mlp.def.cc:508
const ae2f_AnnMlp_t mlp
Definition Mlp.def.cc:317
const ae2f_AnnMlp_t const ae2f_float_t *const ae2f_float_t *const const size_t *const const ae2f_float_t *const const ae2f_float_t *const bias
Definition Mlp.def.cc:323
const ae2f_AnnMlp_t const ae2f_float_t *const ae2f_float_t *const const size_t *const sz
Definition Mlp.def.cc:321
const ae2f_AnnSlp_t slp
Definition Mlp.def.cc:484
const ae2f_AnnMlp_t const ae2f_float_t *const ae2f_float_t *const const size_t *const const ae2f_float_t *const const ae2f_float_t *const ae2f_float_t *const outcache
Definition Mlp.def.cc:325
const ae2f_AnnSlp_t const ae2f_float_t *const const ae2f_float_t *const delta
Definition Mlp.def.cc:486
const ae2f_AnnMlp_t const ae2f_float_t *const ae2f_float_t *const const size_t *const const ae2f_float_t *const weight
Definition Mlp.def.cc:322
const ae2f_AnnSlp_t const ae2f_float_t *const const ae2f_float_t *const const size_t iidx
Definition Mlp.def.cc:488
const ae2f_AnnMlp_t const ae2f_float_t *const ae2f_float_t *const out
Definition Mlp.def.cc:320
size_t const ae2f_AnnSlp_t slp_then
Definition Mlp.def.cc:503
size_t const ae2f_AnnSlp_t ae2f_float_t *const const ae2f_float_t *const deltaseed
Definition Mlp.def.cc:506
#define OPER_NONE
Definition Mlp.def.cc:21
size_t v_send
Definition Mlp.def.cc:502
size_t const ae2f_AnnSlp_t ae2f_float_t *const retdelta_then
Definition Mlp.def.cc:505
const ae2f_AnnMlp_t const ae2f_float_t *const ae2f_float_t *const const size_t *const const ae2f_float_t *const const ae2f_float_t *const ae2f_float_t *const ae2f_AnnAct_t *const *const act_opt
Definition Mlp.def.cc:327
#define OPER_NEG
Definition Mlp.def.cc:20
#define ae2f_errGlob_ALLOC_FAILED
stdlib allocating functions (malloc, calloc, realloc) has been failed.
Definition errGlob.h:40
uint8_t ae2f_err_t
Informs that this number represents the error.
Definition errGlob.h:19
#define ae2f_errGlob_PTR_IS_NULL
Failed to refer the pointer either l-value inside the function.
Definition errGlob.h:32
#define __ae2f_AnnMlpFollowPrimal(...)
Definition Mlp.auto.h:756
#define __ae2f_AnnMlpHidDeltaSingle_imp(v_single, slp, weight, delta, iidx)
Definition Mlp.auto.h:503
#define __ae2f_MACRO_GENERATED
Definition Mlp.auto.h:2
#define __ae2f_AnnMlpTrainAutoPrimal(...)
Definition Mlp.auto.h:897
#define __ae2f_AnnMlpPropagate_imp(v_tmp, v_send, slp_then, retdelta_then, deltaseed, actderiv_then, inp)
delta to delta
Definition Mlp.auto.h:525
#define __ae2f_AnnMlpPredictStream_imp(...)
Definition Mlp.auto.h:491
#define __ae2f_AnnMlpFollowPrimal_imp(OPER_NEG, OPER_NONE, v_follow, mlp, inp, delta, lenv, outstream, deltacache, weight, bias, learningrate, learningrate_bias, actderiv)
Definition Mlp.auto.h:552
#define __ae2f_AnnMlpPredictPrimal(...)
Definition Mlp.auto.h:485
#define __ae2f_AnnMlpPredictPrimal_imp(OPER_NEG, OPER_NONE, v_predict, mlp, inp, out, sz, weight, bias, outcache, act_opt)
layer must be more than 2
Definition Mlp.auto.h:330
#define __ae2f_AnnMlpInitWithOutSz_imp(...)
Definition Mlp.auto.h:321
#define __ae2f_AnnMlpTrainPrimal(...)
Definition Mlp.auto.h:894
#define __ae2f_AnnMlpSz_imp(...)
Definition Mlp.auto.h:318
#define __ae2f_AnnMlpTrainPrimal_imp(OPER_NEG, OPER_NONE, v_train, mlp, inp, out, out_desired, lenv, outstream, deltacache, weight, bias, learningrate, learningrate_bias, act, actderiv, lossderiv)
Definition Mlp.auto.h:767
#define __ae2f_AnnMlpMk_imp(...)
Definition Mlp.auto.h:315
#define __ae2f_AnnSlpFollow_imp(v_follow, _this, prm_in, delta, weight, bias, learningrate, learningrate_bias)
Definition Slp.auto.h:420
#define __ae2f_AnnSlpFetchDelta_imp(v_delta, slp, out, out_desired, actderiv_opt, lossderiv, retdelta)
Definition Slp.auto.h:538
#define __ae2f_AnnSlpPredict_imp(v_predict, _this, prm_in, out, weight, bias, act_opt)
Definition Slp.auto.h:268
#define ae2f_MAC()
delta to delta
Definition mac.h:4