ae2f_docs
Mlp.auto.h
Go to the documentation of this file.
1#undef __ae2f_MACRO_GENERATED
2#define __ae2f_MACRO_GENERATED 1
3/** @file Mlp.auto.h */
4
6#include <assert.h>
7#undef __ae2f_MACRO_GENERATED
8#define __ae2f_MACRO_GENERATED 1
9#include <stdlib.h>
10#undef __ae2f_MACRO_GENERATED
11#define __ae2f_MACRO_GENERATED 1
12#endif
13
15#include <ae2f/Macro.h>
16#undef __ae2f_MACRO_GENERATED
17#define __ae2f_MACRO_GENERATED 1
18#include <ae2f/Ann/Slp.h>
19#undef __ae2f_MACRO_GENERATED
20#define __ae2f_MACRO_GENERATED 1
21#endif
22
23#ifndef ae2f_Ann_Mlp_c
24
26#include <ae2f/Ann/Mlp.h>
27#undef __ae2f_MACRO_GENERATED
28#define __ae2f_MACRO_GENERATED 1
29#endif
30
32#define OPER_NEG
33#define OPER_NONE
34#endif
35
36#define ae2f_Ann_Mlp_c
37
38#define __ae2f_AnnMlpDel_C(a) free(ae2f_reinterpret_cast(void*, a))
39
40#define __ae2f_AnnMlpMk_C(
41 /** tparam */
42
43
44 /** param */
45 /* ,ae2f_err_t* const */ reterr,
46 /* ae2f_AnnMlp** const */ retmk,
47 /* const size_t */ depth,
48 /* constsize_t* const */ szvector,
49 /* ae2f_optsize_t* const */ szswap_opt,
50 /* ae2f_optae2f_AnnActFFN_t** const */ act,
51 /* ae2f_optae2f_AnnActFFN_t** const */ actderiv,
52 /* ae2f_AnnLossFFN_t* const */ lossderiv,
53 /* ae2f_optae2f_float_t* const */ deltastream,
54 /* ae2f_optae2f_float_t* const */ outcache,
55 /* ae2f_optae2f_float_t* const */ weight,
56 /* ae2f_optae2f_float_t* const */ bias,
57 /* ae2f_float_t const */ learningrate,
58 /* ae2f_float_t const */ learningrate_bias,
59 /* ae2f_opt const size_t */ offset,
60 /* ae2f_opt const size_t */ extra \
61){
62 if((reterr) && *(reterr)) {}
63 else unless((szvector) && (lossderiv) && (retmk))
64 (reterr) && (*(reterr) |= ae2f_errGlob_PTR_IS_NULL);
65 else {
66 ae2f_AnnMlpMk_t v_mk;
68 v_mk
69 , depth, szvector, szswap_opt
70 , act, actderiv, lossderiv
71 , deltastream, outcache, weight
72 , bias, learningrate, learningrate_bias
73 , offset, extra
74 );
75
76 assert(v_mk.m_mkbase && "Initialising has failed");
77 *(retmk) = v_mk.m_mkbase;
78 unless(v_mk.m_mkbase) {
79 (reterr) && (*(reterr) |= ae2f_errGlob_ALLOC_FAILED);
80 }
81 } \
82}
83
84
85/**
86 * @def __ae2f_AnnMlpMk_imp(reg_mk, prm_depth, pprm_szvector, propptr_szswap_opt, lppfn_act_opt, lppfn_actderiv_opt, pfn_lossderiv, propptr_deltastream_opt, propptr_outcache_opt, propptr_weight_opt, propptr_bias_opt, prm_learningrate, prm_learningrate_bias, prm_offset_opt, prm_extra_opt)
87 *
88 * @brief
89 * Automatically allocates ae2f_AnnMlp and store its pointer at `(reg_mk).m_mkbase`.
90 *
91 * @details
92 * If some parameter has <prop>, it means it's element(or value) will be handled by mlp in future. \n
93 * > Which means it must be valid at least longer than a class newly allocated. \n
94 * > Unless a parameter has <prop> with <init>, memory of the given parameter will not be initialised.
95 *
96 * <reg> means it has state(which is mutable), and its memory does not require to be allocated linearly.
97 *
98 * <opt> means its value could be '0', '\0', 0, 0x0, NULL, or nullptr.
99 *
100 * <prm> means it could be primitive value, such as non-variable.
101 *
102 * @param[in, out] reg_mk <reg> \n
103 * Type: ae2f_reg ae2f_AnnMlpMk_t& \n
104 * Brief: A temporary buffer for this function.
105 *
106 * @param prm_depth <prm> \n
107 * Type: const size_t \n
108 * Brief: Depth for this machine willing to allocate.
109 *
110 * @param[in] pprm_szvector <ptr> <const> \n
111 * Type: const size_t[prm_depth] \n
112 * Brief: A shape of the model.
113 *
114 * @param[out] propptr_szswap_opt <prop> <ptr> <opt> <init> \n
115 * Type: size_t[prm_depth]& \n
116 * Brief: Optional valid buffer for Mlp to store the value of pprm_szvector.
117 *
118 * @param lppfn_act_opt <prop> <ptr> <fn> <opt> \n
119 * Type: ae2f_AnnActFFN_t[prm_depth]& \n
120 * Brief: Optional valid buffer for activation function for each layer.
121 *
122 * @param lppfn_actderiv_opt <prop> <ptr> <fn> <opt> \n
123 * Type: ae2f_AnnActFFN_t[prm_depth]& \n
124 * Brief: Optional valid buffer for activation derivative for each layer.
125 *
126 * @param pfn_lossderiv <fn> <ptr> <prm> \n
127 * Type: ae2f_AnnLossFFN_t \n
128 * Brief: Derivative of loss function for mlp model.
129 *
130 * @param propptr_deltastream_opt <prop> <ptr> \n
131 * Type: ae2f_float_t[MAX(pprm_szvector) * ((prm_depth) - 1)]& \n
132 * Brief: Optional delta stream buffer.
133 *
134 * @param propptr_outcache_opt <prop> <ptr> \n
135 * Type: ae2f_float_t[MAX(pprm_szvector) * ((prm_depth) - 1)]& \n
136 * Brief: Optional output stream buffer.
137 *
138 * @param propptr_weight_opt <prop> <ptr> \n
139 * Type: ae2f_float_t[MAXWEIGHT(pprm_szvector) * ((prm_depth) - 1)] \n
140 * Brief: Optional weight buffer. \n
141 * Details: To compute MAXWEIGHT, you could find a max value of multiplications of each neighbour.
142 *
143 * @param propptr_bias_opt <prop> <ptr> \n
144 * Type: ae2f_float_t[MAX(pprm_szvector) * ((prm_depth) - 1)]& \n
145 * Brief: Optional bias buffer.
146 *
147 * @param prm_learningrate <prm> \n
148 * Type: const ae2f_float_t \n
149 * Brief: learning rate for weights.
150 *
151 * @param prm_learningrate_bias <prm> \n
152 * Type: const ae2f_float_t \n
153 * Brief: learning rate for bias.
154 *
155 * @param prm_offset_opt <prm> <opt> \n
156 * Type: const size_t \n
157 * Brief: Desired gap between structure itself and additional buffers as bytes.
158 *
159 * @param prm_extra_opt <prm> <opt> \n
160 * Type: const size_t \n
161 * Brief: Desired extra buffer size as bytes.
162 * */
163#define __ae2f_AnnMlpMk_imp(
164 /** tparam */
165
166
167 /** param */
168 /* , ae2f_AnnMlpMk_t */ reg_mk,
169 /* const size_t */ prm_depth,
170 /* constsize_t* const */ pprm_szvector,
171 /* size_t* const */ propptr_szswap_opt,
172 /* ae2f_optae2f_AnnActFFN_t** const */ lppfn_act_opt,
173 /* ae2f_optae2f_AnnActFFN_t** const */ lppfn_actderiv_opt,
174 /* ae2f_AnnLossFFN_t* const */ pfn_lossderiv,
175 /* ae2f_optae2f_float_t* const */ propptr_deltastream_opt,
176 /* ae2f_optae2f_float_t* const */ propptr_outcache_opt,
177 /* ae2f_optae2f_float_t* const */ propptr_weight_opt,
178 /* ae2f_optae2f_float_t* const */ propptr_bias_opt,
179 /* const ae2f_float_t */ prm_learningrate,
180 /* const ae2f_float_t */ prm_learningrate_bias,
181 /* ae2f_opt const size_t */ prm_offset_opt,
182 /* ae2f_opt const size_t */ prm_extra_opt \
183)\
184{
185 assert((pprm_szvector) && "Size vector is null");
186 assert((pfn_lossderiv) && "Loss deriv must be initialised");
187 assert((prm_depth) > 2 && "Depth must be greater than 2");
188
189 (reg_mk).m_outc = 1;
190 (reg_mk).m_weightc = 1;
191 for((reg_mk).m_i = (prm_depth); (reg_mk).m_i--; ) {
192 assert((pprm_szvector)[(reg_mk).m_i] && "Zero value is permitted");
193 (reg_mk).m_outc < (pprm_szvector)[(reg_mk).m_i] && ((reg_mk).m_outc = (pprm_szvector)[(reg_mk).m_i]);
194 if((reg_mk).m_i == (prm_depth) - 1) continue;
195 (reg_mk).m_weightc =
196 (reg_mk).m_weightc < (pprm_szvector)[(reg_mk).m_i] * (pprm_szvector)[(reg_mk).m_i + 1] ?
197 (pprm_szvector)[(reg_mk).m_i] * (pprm_szvector)[(reg_mk).m_i + 1] :
198 (reg_mk).m_weightc
199 ;
200 }
201
203 (reg_mk).m_i
204 , (reg_mk).m_outc
205 , (reg_mk).m_weightc
206 , prm_depth, propptr_szswap_opt
207 , lppfn_act_opt, lppfn_actderiv_opt
208 , propptr_deltastream_opt, propptr_outcache_opt
209 , propptr_weight_opt, propptr_bias_opt
210 );
211
212 (reg_mk).m_mkptr.m_void = calloc(1, (reg_mk).m_i + (prm_offset_opt) + (prm_extra_opt));
213 (reg_mk).m_mkbase = (reg_mk).m_mkptr.m_mlp;
214
215 if((reg_mk).m_mkptr.m_void) {
217 *(reg_mk).m_mkptr.m_mlp
218 , (reg_mk).m_i
219 , prm_depth
220 , (reg_mk).m_outc
221 , (reg_mk).m_weightc
222 , pprm_szvector
223 , (propptr_szswap_opt) ? (propptr_szswap_opt) : ae2f_reinterpret_cast(
224 size_t*
225 , (reg_mk).m_mkptr.m_mlp + 1)
226 , lppfn_act_opt
227 , lppfn_actderiv_opt
228 , pfn_lossderiv
229 , propptr_deltastream_opt
230 , propptr_outcache_opt
231 , propptr_weight_opt
232 , propptr_bias_opt
233 , prm_learningrate
234 , prm_learningrate_bias
235 );
236
237 (reg_mk).m_mkptr.m_mlp += 1;
238 (reg_mk).m_mkptr.m_byte += (prm_offset_opt);
239 (reg_mk).m_mkptr.m_sz += (prm_depth);
240
241 unless(lppfn_act_opt) {
242 (reg_mk).m_mkbase->m_act = (reg_mk).m_mkptr.m_Act;
243 (reg_mk).m_mkptr.m_Act += (prm_depth) - 1;
244 }
245
246 unless(lppfn_actderiv_opt) {
247 (reg_mk).m_mkbase->m_actderiv = (reg_mk).m_mkptr.m_Act;
248 (reg_mk).m_mkptr.m_Act += (prm_depth) - 1;
249 }
250
251 unless(propptr_deltastream_opt) {
252 (reg_mk).m_mkbase->m_deltastream = (reg_mk).m_mkptr.m_f;
253 (reg_mk).m_mkptr.m_f += ((prm_depth) - 1) * (reg_mk).m_outc;
254 }
255
256 unless(propptr_outcache_opt) {
257 (reg_mk).m_mkbase->m_outcache = (reg_mk).m_mkptr.m_f;
258 (reg_mk).m_mkptr.m_f += ((prm_depth) - 1) * (reg_mk).m_outc;
259 }
260
261 unless(propptr_bias_opt) {
262 (reg_mk).m_mkbase->m_bias = (reg_mk).m_mkptr.m_f;
263 (reg_mk).m_mkptr.m_f += ((prm_depth) - 1) * (reg_mk).m_outc;
264 }
265
266 unless(propptr_weight_opt) {
267 (reg_mk).m_mkbase->m_weight = (reg_mk).m_mkptr.m_f;
268 }
269 } \
270}
271
272#define __ae2f_AnnMlpSz_imp(
273 /** tparam */
274
275
276 /** param */
277 /* , size_t */ ret_sz,
278 /* const size_t */ outc,
279 /* const size_t */ weightc,
280 /* const size_t */ depth,
281 /* const bool */ szswap,
282 /* const bool */ act,
283 /* const bool */ actderiv,
284 /* const bool */ deltastream,
285 /* const bool */ outcache,
286 /* const bool */ weight,
287 /* const bool */ bias \
288)\
289{
290 assert((depth) > 2);
291
292 (ret_sz) = sizeof(ae2f_AnnMlp) + (!(szswap)) * sizeof(size_t);
293 (ret_sz) += (sizeof(void*) * ((depth) - 1)) * (!(act) + !(actderiv));
294 (ret_sz) += sizeof(ae2f_float_t)
295 * ((depth)) * (outc)
296 * (/**/
297 !(deltastream) + !(outcache) + !(bias)
298 );
299 (ret_sz) += sizeof(ae2f_float_t)
300 * (!(weight) * (weightc) * (depth)); \
301}
302
303#define __ae2f_AnnMlpInitWithOutSz_imp(
304 /** tparam */
305
306
307 /** param */
308 /* , ae2f_AnnMlp */ v_mlp,
309 /* size_t */ v_init,
310 /* const size_t */ depth,
311 /* const size_t */ outsz,
312 /* const size_t */ weightsz,
313 /* constsize_t* const */ szvector,
314 /* size_t* const */ szswap_opt,
315 /* ae2f_optae2f_AnnActFFN_t** const */ act,
316 /* ae2f_optae2f_AnnActFFN_t** const */ actderiv,
317 /* ae2f_AnnLossFFN_t* const */ lossderiv,
318 /* ae2f_optae2f_float_t* const */ deltastream,
319 /* ae2f_optae2f_float_t* const */ outcache,
320 /* ae2f_optae2f_float_t* const */ weight,
321 /* ae2f_optae2f_float_t* const */ bias,
322 /* ae2f_float_t const */ learningrate,
323 /* ae2f_float_t const */ learningrate_bias \
324)\
325{
326 assert((depth) >= 2 && "At lest you need input and output layer");
327 (v_mlp).m_depth = (depth);
328 (v_mlp).m_outc = (outsz);
329 (v_mlp).m_weightc = (weightsz);
330
331 assert((lossderiv) && "loss deriv is null");
332 (v_mlp).m_lossderiv = lossderiv;
333
334 (v_mlp).m_sz = (szswap_opt);
335 (v_mlp).m_act = (act);
336 (v_mlp).m_actderiv = (actderiv);
337
338 (v_mlp).m_deltastream = deltastream;
339 (v_mlp).m_outcache = outcache;
340 (v_mlp).m_weight = weight;
341 (v_mlp).m_bias = bias;
342
343 (v_mlp).m_learningrate = learningrate;
344 (v_mlp).m_learningrate_bias = learningrate_bias;
345
346 if((szswap_opt) && (szswap_opt) != (szvector))
347 for((v_init) = (depth); (v_init)--; ) {
348 (szswap_opt)[(v_init)] = (szvector)[(v_init)];
349 } \
350}
351
352#define __ae2f_AnnMlpInit_imp(
353 /** tparam */
354
355
356 /** param */
357 /* , ae2f_AnnMlp */ v_mlp,
358 /* ae2f_AnnMlpInit_t */ v_init,
359 /* const size_t */ depth,
360 /* constsize_t* const */ szvector,
361 /* size_t* const */ szswap_opt,
362 /* ae2f_optae2f_AnnActFFN_t** const */ act,
363 /* ae2f_optae2f_AnnActFFN_t** const */ actderiv,
364 /* ae2f_AnnLossFFN_t* const */ lossderiv,
365 /* ae2f_optae2f_float_t* const */ deltastream,
366 /* ae2f_optae2f_float_t* const */ outcache,
367 /* ae2f_optae2f_float_t* const */ weight,
368 /* ae2f_optae2f_float_t* const */ bias,
369 /* ae2f_float_t const */ learningrate,
370 /* ae2f_float_t const */ learningrate_bias \
371)\
372{
373 (v_init).m_outc = 0;
374 (v_init).m_weightc = 0;
375
376 assert((szvector) && "Size vector is null");
377 for((v_init).m_i = (depth); (v_init).m_i--; ) {
378 assert((szvector)[(v_init).m_i] && "Zero value is permitted");
379 (v_init).m_outc < (szvector)[(v_init).m_i] && ((v_init).m_outc = (szvector)[(v_init).m_i]);
380 if((v_init).m_i == (depth) - 1) continue;
381
382 (v_init).m_weightc =
383 (v_init).m_weightc < (szvector)[(v_init).m_i] * (szvector)[(v_init).m_i + 1] ?
384 (szvector)[(v_init).m_i] * (szvector)[(v_init).m_i + 1] :
385 (v_init).m_weightc;
386 }
387
389 v_mlp, (v_init).m_i, depth, (v_mlp).m_outc, (v_mlp).m_weightc
390 , szvector, szswap_opt, act, actderiv, lossderiv
391 , deltastream, outcache, weight, bias, learningrate, learningrate_bias
392 ); \
393}
394
395
396/** @brief layer must be more than 2 */
397#define __ae2f_AnnMlpPredictPrimal_imp(
398 /** tparam */
399 OPER_NEG,OPER_NONE,
400
401 /** param */
402 /* , ae2f_AnnMlpPredict_t */ v_predict,
403 /* const ae2f_AnnMlp_t */ mlp,
404 /* constae2f_float_t* const */ inp,
405 /* ae2f_float_t* const */ out,
406 /* constsize_t* const */ sz,
407 /* constae2f_float_t* const */ weight,
408 /* constae2f_float_t* const */ bias,
409 /* ae2f_float_t* const */ outcache,
410 /* ae2f_AnnActFFN_t* const* const */ act_opt \
411)\
412{
413 assert((mlp).m_depth > 2);
414
415 (v_predict).m_depth = (mlp).m_depth - 1;
416 (v_predict).m_outc_max = (mlp).m_outc;
417
418 (v_predict).m_inc = (sz)[0];
419 (v_predict).m_outc = (sz)[1];
420
421 if((act_opt)[0]) {
423 (v_predict)
424 , (v_predict)
425 , inp
426 , ((outcache) + (0 OPER_NONE) * (v_predict).m_outc_max)
427 , ((outcache) + (0 OPER_NONE) * (v_predict).m_outc_max)
428 , weight
429 , bias
430 , (act_opt)[0]
431 );
432 } else {
434 (v_predict)
435 , (v_predict)
436 , inp
437 , ((outcache) + (0 OPER_NONE) * (v_predict).m_outc_max)
438 , ((outcache) + (0 OPER_NONE) * (v_predict).m_outc_max)
439 , weight
440 , bias
441 ,
442 );
443 }
444
445
446 for(
447 (v_predict).m_k = 1;
448 (v_predict).m_k < (v_predict).m_depth - 1;
449 (v_predict).m_k++
450 )
451 {
452 (v_predict).m_inc = (v_predict).m_outc;
453 (v_predict).m_outc = (sz)[(v_predict).m_k + 1];
454
455 if((act_opt)[(v_predict).m_k]) {
456 assert((v_predict).m_k);
457 assert(((v_predict).m_k OPER_NEG) != ((v_predict).m_k OPER_NONE));
458 assert(((v_predict).m_k OPER_NEG) == (((v_predict).m_k - 1) OPER_NONE));
459 assert((((v_predict).m_k + 1) OPER_NEG) == (((v_predict).m_k) OPER_NONE));
460
462 (v_predict)
463 , (v_predict)
464 , ((outcache) + (((v_predict).m_k OPER_NEG) * (v_predict).m_outc_max))
465 , ((outcache) + (((v_predict).m_k OPER_NONE) * (v_predict).m_outc_max))
466 , ((outcache) + (((v_predict).m_k OPER_NONE) * (v_predict).m_outc_max))
467 , (weight) + ((v_predict).m_k) * ((mlp).m_weightc)
468 , (bias) + ((v_predict).m_k) * (v_predict).m_outc_max
469 , (act_opt)[(v_predict).m_k]
470 );
471 } else {
473 (v_predict)
474 , (v_predict)
475 , ((outcache) + (
476 ((v_predict).m_k OPER_NEG)
477 * (v_predict).m_outc_max
478 ))
479 , ((outcache) + (
480 ((v_predict).m_k OPER_NONE)
481 * (v_predict).m_outc_max
482 ))
483 , ((outcache) + (
484 ((v_predict).m_k OPER_NONE)
485 * (v_predict).m_outc_max
486 ))
487 , (weight)
488 + (((v_predict).m_k) * (mlp).m_weightc)
489 , (bias) + ((v_predict).m_k) * (v_predict).m_outc_max
490 ,
491 );
492 }
493 }
494
495 (v_predict).m_inc = (v_predict).m_outc;
496 (v_predict).m_outc = (sz)[(v_predict).m_k + 1];
497 assert((v_predict).m_k == (mlp).m_depth - 2);
498
499 if((act_opt)[(v_predict).m_k]) {
501 (v_predict)
502 , (v_predict)
503 , ((outcache) + ((((v_predict).m_k OPER_NEG)) * (v_predict).m_outc_max))
504 , (out)
505 , (out)
506 , (weight) + ((v_predict).m_k) * (v_predict).m_outc_max * (v_predict).m_outc_max
507 , (bias) + ((v_predict).m_k) * (v_predict).m_outc_max
508 , (act_opt)[(v_predict).m_k]
509 );
510 } else {
512 (v_predict)
513 , (v_predict)
514 , ((outcache) + ((((v_predict).m_k OPER_NEG)) * (v_predict).m_outc_max))
515 , (out)
516 , (out)
517 , ((weight) + ((v_predict).m_k) * (mlp).m_weightc)
518 , ((bias) + ((v_predict).m_k) * (v_predict).m_outc_max)
519 ,
520 );
521 } \
522}
523
524#define __ae2f_AnnMlpPredictPrimal(
525 /** tparam */
526 OPER_NEG,OPER_NONE,
527
528 /** param */
529 /* ,ae2f_err_t* */ reterr,
530 /* constae2f_AnnMlp* const */ mlp,
531 /* constae2f_float_t* const */ inp,
532 /* ae2f_float_t* const */ out \
533) \
534{
535 if((reterr) && *(reterr))
536 ;
537 else unless((mlp) && (inp) && (out)) {
538 assert(0 && "Null");
539 (reterr) && (*(reterr) |= ae2f_errGlob_PTR_IS_NULL);
540 } else {
541 ae2f_AnnMlpPredict_t v_predict;
542
544 OPER_NEG, OPER_NONE
545 , v_predict, *(mlp)
546 , inp, out
547 , (mlp)->m_sz, (mlp)->m_weight
548 , (mlp)->m_bias, (mlp)->m_outcache
549 , (mlp)->m_act
550 );
551 } \
552}
553
554#define __ae2f_AnnMlpPredictStream_imp(v_predict, mlp, inp, out, sz, weight, bias, outcache, act_opt)
555 __ae2f_AnnMlpPredictPrimal_imp(-1, , v_predict, mlp, inp, out, sz, weight, bias, outcache, act_opt)
556
557#define __ae2f_AnnMlpPredictStream_C(reterr, mlp, inp, out)
558 __ae2f_AnnMlpPredictPrimal(-1, , reterr, mlp, inp, out)
559
560#define __ae2f_AnnMlpPredict_imp(v_predict, mlp, inp, out, sz, weight, bias, outcache, act_opt)
561 __ae2f_AnnMlpPredictPrimal_imp(&1 ? 0 : 1, &1, v_predict, mlp, inp, out, sz, weight, bias, outcache, act_opt)
562
563#define __ae2f_AnnMlpPredict_C(reterr, mlp, inp, delta)
564 __ae2f_AnnMlpPredictPrimal(&1 ? 0 : 1, &1, reterr, mlp, inp, delta)
565
566#define __ae2f_AnnMlpHidDeltaSingle_imp(
567 /** tparam */
568
569
570 /** param */
571 /* , ae2f_AnnMlpHidDeltaSingle_t */ v_single,
572 /* const ae2f_AnnSlpREG_t */ slp,
573 /* constae2f_float_t* const */ weight,
574 /* constae2f_float_t* const */ delta,
575 /* const size_t */ iidx \
576)\
577{
578 (v_single).m_ret = 0;
579
580 for((v_single).m_i = (slp).m_outc; (v_single).m_i--; )
581 {
582 (v_single).m_ret +=
583 ((weight) + (slp).m_inc * (v_single).m_i)[iidx] * (delta)[(v_single).m_i];
584 } \
585}
586
587/** @brief delta to delta */
588#define __ae2f_AnnMlpBwd_imp(
589 /** tparam */
590
591
592 /** param */
593 /* , ae2f_float_t */ v_tmp,
594 /* size_t */ v_send,
595 /* const ae2f_AnnSlpREG_t */ slp_then,
596 /* ae2f_float_t* const */ retdelta_then,
597 /* constae2f_float_t* const */ deltaseed,
598 /* ae2f_AnnActFFN_t */ actderiv_then,
599 /* constae2f_float_t* const */ inp \
600)\
601{
602 for((v_send) = (slp_then).m_outc; (v_send)--;) {
603 actderiv_then(&(v_tmp), inp, v_send, (slp_then).m_outc);
604 (retdelta_then)[v_send] = (v_tmp) * (deltaseed)[v_send];
605 } \
606}
607
608#define __ae2f_AnnMlpFollowStream_imp(v_follow, mlp, inp, delta, lenv, outstream, deltacache, weight, bias, lr_w, lr_b, actderiv)
609 __ae2f_AnnMlpFollowPrimal_imp(-1,,v_follow, mlp, inp, delta, lenv, outstream, deltacache, weight, bias, lr_w, lr_b, actderiv)
610
611
612#define __ae2f_AnnMlpFollow_imp(v_follow, mlp, inp, delta, lenv, outstream, deltacache, weight, bias, lr_w, lr_b, actderiv)
613 __ae2f_AnnMlpFollowPrimal_imp(&1 ? 0 : 1,&1, v_follow, mlp, inp, delta, lenv, outstream, deltacache, weight, bias, lr_w, lr_b, actderiv)
614
615#define __ae2f_AnnMlpFollowPrimal_imp(
616 /** tparam */
617 OPER_NEG,OPER_NONE,
618
619 /** param */
620 /* , ae2f_AnnMlpFollow_t */ v_follow,
621 /* const ae2f_AnnMlp_t */ mlp,
622 /* constae2f_float_t* const */ inp,
623 /* constae2f_float_t* const */ delta,
624 /* constsize_t* const */ lenv,
625 /* constae2f_float_t* const */ outstream,
626 /* ae2f_float_t* const */ deltacache,
627 /* ae2f_float_t* const */ weight,
628 /* ae2f_float_t* const */ bias,
629 /* const ae2f_float_t */ learningrate,
630 /* const ae2f_float_t */ learningrate_bias,
631 /* ae2f_AnnActFFN_t* const* const */ actderiv \
632)\
633{
634 assert(((mlp).m_depth > 2) && "m_depth must be more than 1.");
635 assert((inp) && "inp is null");
636 assert((weight) && "weight is null");
637 assert((bias) && "bias is null");
638 assert((actderiv) && "actderiv list is null");
639 assert((delta) && "delta is null");
640 assert(lenv);
641
642 /** m_k: index for then */
643 (v_follow).m_k = (mlp).m_depth - 2;
644
645 /** permanent */
646 (v_follow).m_pg_out = (mlp).m_outc;
647 (v_follow).m_pg_weight = (mlp).m_weightc;
648
649 (v_follow).m_outc = (lenv)[(v_follow).m_k + 1];
650 (v_follow).m_inc = (lenv)[(v_follow).m_k];
651
652 /** \
653 * out field update (delta is specified) \
654 * */
656 v_follow
657 , (v_follow)
658 , ((outstream) + (v_follow).m_pg_out * ((v_follow).m_k - 1))
659 , (delta)
660 , ((weight) + ((v_follow).m_pg_weight * ((v_follow).m_k)))
661 , ((bias) + ((v_follow).m_pg_out * ((v_follow).m_k)))
662 , learningrate
663 , learningrate_bias
664 );
665
666 (v_follow).m_j = (v_follow).m_inc;
667 while((v_follow).m_j--) {
669 v_follow
670 , v_follow
671 , ((weight) + ((v_follow).m_pg_weight * (v_follow).m_k))
672 , (delta)
673 , (v_follow).m_j
674 );
675
676 ((deltacache) + (v_follow).m_pg_out * ((v_follow).m_k OPER_NEG))[(v_follow).m_j]
677 = (v_follow).m_ret;
678 }
679
680 (v_follow).m_outc = (lenv)[(v_follow).m_k];
681 (v_follow).m_inc = (lenv)[(v_follow).m_k - 1];
682
683 /** nxt-delta to then-delta */
684 if((actderiv)[(v_follow).m_k - 1]) {
686 (v_follow).m_ret
687 , (v_follow).m_stack.m_send
688 , (v_follow)
689 , ((deltacache) + ((v_follow).m_pg_out * ((v_follow).m_k OPER_NEG)))
690 , ((deltacache) + ((v_follow).m_pg_out * ((v_follow).m_k OPER_NEG)))
691 , (actderiv)[(v_follow).m_k - 1]
692 , ((outstream) + (v_follow).m_pg_out * ((v_follow).m_k - 1)) /** input of (v_follow).m_k */
693 );
694 } else {
696 (v_follow).m_ret
697 , (v_follow).m_stack.m_send
698 , (v_follow)
699 , ((deltacache) + (((v_follow).m_pg_out) * ((v_follow).m_k OPER_NEG)))
700 , ((deltacache) + (((v_follow).m_pg_out) * ((v_follow).m_k OPER_NEG)))
701 , /** actderiv */
702 , ((outstream) + (v_follow).m_pg_out * ((v_follow).m_k - 1))
703 );
704 }
705
706
707 while(--(v_follow).m_k) {
708 assert((v_follow).m_k);
709 assert(((v_follow).m_k OPER_NEG) != ((v_follow).m_k OPER_NONE));
710 assert(((v_follow).m_k OPER_NEG) == (((v_follow).m_k - 1) OPER_NONE));
711 assert((((v_follow).m_k + 1) OPER_NEG) == (((v_follow).m_k) OPER_NONE));
712
713 /** \
714 * out field update (delta is generated) \
715 * */
717 (v_follow)
718 , (v_follow)
719 , ((outstream) + (v_follow).m_pg_out * ((v_follow).m_k - 1))
720 , ((deltacache) + (v_follow).m_pg_out * ((v_follow).m_k OPER_NONE))
721 , ((weight) + ((v_follow).m_pg_weight * (v_follow).m_k))
722 , ((bias) + ((v_follow).m_pg_out * ((v_follow).m_k)))
723 , learningrate
724 , learningrate_bias
725 );
726
727 (v_follow).m_j = (v_follow).m_inc;
728 assert((v_follow).m_inc == (lenv)[(v_follow).m_k]);
729
730 while((v_follow).m_j--) {
731 (v_follow).m_ret = 0;
732
734 v_follow
735 , v_follow
736 , ((weight) + (v_follow).m_pg_weight * ((v_follow).m_k))
737 , ((deltacache) + (v_follow).m_pg_out * ((v_follow).m_k OPER_NONE))
738 , (v_follow).m_j
739 );
740
741 ((deltacache) + (v_follow).m_pg_out * ((v_follow).m_k OPER_NEG))[(v_follow).m_j]
742 = (v_follow).m_ret;
743 }
744
745 (v_follow).m_outc = (v_follow).m_inc;
746 (v_follow).m_inc = (lenv)[(v_follow).m_k - 1];
747 assert((v_follow).m_outc == (lenv)[(v_follow).m_k]);
748
749 /** nxt-delta to then-delta */
750 if((actderiv)[(v_follow).m_k - 1]) {
752 (v_follow).m_ret
753 , (v_follow).m_stack.m_send
754 , (v_follow)
755 , ((deltacache) + ((v_follow).m_pg_out * ((v_follow).m_k OPER_NEG)))
756 , ((deltacache) + ((v_follow).m_pg_out * ((v_follow).m_k OPER_NEG)))
757 , (actderiv)[(v_follow).m_k - 1]
758 , ((outstream) + (v_follow).m_pg_out * ((v_follow).m_k - 1))
759 );
760 } else {
762 (v_follow).m_ret
763 , (v_follow).m_stack.m_send
764 , (v_follow)
765 , ((deltacache) + (((v_follow).m_pg_out) * ((v_follow).m_k OPER_NEG)))
766 , ((deltacache) + (((v_follow).m_pg_out) * ((v_follow).m_k OPER_NEG)))
767 , /** actderiv */
768 , ((outstream) + (v_follow).m_pg_out * ((v_follow).m_k - 1))
769 );
770 }
771 }
772
773 assert(((v_follow).m_k) == 0 && "(v_follow).m_k must be zero.");
774 assert((v_follow).m_inc == (lenv)[0] && "inc must be same as first element of lenv.");
775 assert((v_follow).m_outc == (lenv)[1] && "outc must be same as second element of lenv.");
776
778 v_follow
779 , (v_follow)
780 , (inp)
781 , ((deltacache) + (v_follow).m_pg_out * ((v_follow).m_k OPER_NONE))
782 , ((weight))
783 , (bias)
784 , learningrate
785 , learningrate_bias
786 ); \
787}
788
789#define __ae2f_AnnMlpFollowPrimal(
790 /** tparam */
791 OPER_NEG,OPER_NONE,
792
793 /** param */
794 /* ,ae2f_err_t* const */ reterr,
795 /* constae2f_AnnMlp* */ mlp,
796 /* constae2f_float_t* const */ inp,
797 /* constae2f_float_t* const */ delta \
798) \
799{
800 if((reterr) && *(reterr)) {}
801 else unless((mlp) && (inp) && (delta)) {
802 assert(0 && "nullref");
803 (reterr) && (*(reterr) |= ae2f_errGlob_PTR_IS_NULL);
804 } else {
805 ae2f_AnnMlpFollow_t v_follow;
806
808 OPER_NEG, OPER_NONE
809 , v_follow
810 , *(mlp), inp, delta, (mlp)->m_sz
811 , (mlp)->m_outcache, (mlp)->m_deltastream
812 , (mlp)->m_weight
813 , (mlp)->m_bias
814 , (mlp)->m_learningrate, (mlp)->m_learningrate_bias
815 , (mlp)->m_actderiv
816 );
817 } \
818}
819
820#define __ae2f_AnnMlpFollow_C(reterr, mlp, inp, delta)
821 __ae2f_AnnMlpFollowPrimal(&1 ? 0 : 1,&1, reterr, mlp, inp, delta)
823#define __ae2f_AnnMlpFollowStream_C(reterr, mlp, inp, delta)
824 __ae2f_AnnMlpFollowPrimal(-1, , reterr, mlp, inp, delta)
825
826#define __ae2f_AnnMlpTrainPrimal_imp(
827 /** tparam */
828 OPER_NEG,OPER_NONE,
829
830 /** param */
831 /* , ae2f_AnnMlpTrain_t */ v_train,
832 /* const ae2f_AnnMlp_t */ mlp,
833 /* constae2f_float_t* const */ inp,
834 /* ae2f_float_t* const */ out,
835 /* constae2f_float_t* const */ out_desired,
836 /* constsize_t* const */ lenv,
837 /* ae2f_float_t* const */ outstream,
838 /* ae2f_float_t* const */ deltacache,
839 /* ae2f_float_t* const */ weight,
840 /* ae2f_float_t* const */ bias,
841 /* const ae2f_float_t */ learningrate,
842 /* const ae2f_float_t */ learningrate_bias,
843 /* ae2f_optae2f_AnnActFFN_t* const* const */ act,
844 /* ae2f_optae2f_AnnActFFN_t* const* const */ actderiv,
845 /* ae2f_AnnLossFFN_t* */ lossderiv \
846)\
847{
848 assert((lenv) && "lengh vector nil");
849 assert(((mlp).m_depth > 2) && "I see no hidden layer");
850 assert(lossderiv);
851 assert((actderiv) && "actderiv list");
852
853 (v_train).m_inc = (lenv)[(mlp).m_depth - 3];
854 (v_train).m_outc = (lenv)[(mlp).m_depth - 2];
855
857 (v_train), mlp, inp, out, lenv,
858 weight, bias, outstream, act
859 );
860
861 if((actderiv)[(mlp).m_depth - 2]) {
863 (v_train), (v_train), (out), (out_desired)
864 , (actderiv)[(mlp).m_depth - 2]
865 , (lossderiv)
866 , (&((deltacache)[(mlp).m_outc * (((mlp).m_depth - 2) OPER_NONE)]))
867 );
868 } else {
870 (v_train), (v_train), (out), (out_desired)
872 , (lossderiv)
873 , (&((deltacache)[(mlp).m_outc * (((mlp).m_depth - 2) OPER_NONE)]))
874 );
875 }
876
878 OPER_NEG, OPER_NONE
879 , v_train, mlp, inp
880 , (&((deltacache)[(mlp).m_outc * (((mlp).m_depth - 2) OPER_NONE)]))
881 , lenv, outstream
882 , deltacache, weight, bias
883 , learningrate, learningrate_bias
884 , actderiv
885 ); \
886}
887
888#define __ae2f_AnnMlpTrainPrimal(
889 /** tparam */
890 OPER_NEG,OPER_NONE,
891
892 /** param */
893 /* ,ae2f_err_t* const ae2f_opt */ reterr,
894 /* ae2f_AnnMlp* const */ mlp,
895 /* constae2f_float_t* const */ inp,
896 /* ae2f_float_t* const */ out,
897 /* constae2f_float_t* const */ out_desired \
898)\
899{
900 if((reterr) && *(reterr));
901 else unless((mlp) && (out) && (out_desired) && (inp)) {
902 assert(0 && "nullref");
903 (reterr) && (*(reterr) |= ae2f_errGlob_PTR_IS_NULL);
904 } else {
905 ae2f_AnnMlpTrain_t v_train;
907 OPER_NEG, OPER_NONE
908 , v_train, *(mlp), inp
909 , out, out_desired
910 , (mlp)->m_sz, (mlp)->m_outcache
911 , (mlp)->m_deltastream
912 , (mlp)->m_weight, (mlp)->m_bias
913 , (mlp)->m_learningrate, (mlp)->m_learningrate_bias
914 , (mlp)->m_act, (mlp)->m_actderiv, (mlp)->m_lossderiv
915 );
916 } \
917}
918
919#define __ae2f_AnnMlpTrainAutoPrimal(
920 /** tparam */
921 OPER_NEG,OPER_NONE,
922
923 /** param */
924 /* ,ae2f_err_t* const ae2f_opt */ reterr,
925 /* ae2f_AnnMlp* const */ mlp,
926 /* constae2f_float_t* const */ inp,
927 /* constae2f_float_t* const */ out_desired \
928)\
929{
930 if((reterr) && *(reterr));
931 else unless((mlp) && (out_desired) && (inp)) {
932 assert(0 && "nullref");
933 (reterr) && (*(reterr) |= ae2f_errGlob_PTR_IS_NULL);
934 } else {
935 ae2f_AnnMlpTrain_t v_train;
937 OPER_NEG, OPER_NONE
938 , v_train, *(mlp), inp
939 , &(mlp)->m_outcache[((mlp)->m_outc) * ((mlp)->m_depth - 2)]
940 , out_desired
941 , (mlp)->m_sz, (mlp)->m_outcache
942 , (mlp)->m_deltastream
943 , (mlp)->m_weight, (mlp)->m_bias
944 , (mlp)->m_learningrate, (mlp)->m_learningrate_bias
945 , (mlp)->m_act, (mlp)->m_actderiv, (mlp)->m_lossderiv
946 );
947 } \
948}
949
950
951#define __ae2f_AnnMlpTrain_C(reterr, mlp, inp, out, out_desired)
952 __ae2f_AnnMlpTrainPrimal(&1 ? 0 : 1, &1, reterr, mlp, inp, out, out_desired)
953
954#define __ae2f_AnnMlpTrainStream_C(reterr, mlp, inp, out, out_desired)
955 __ae2f_AnnMlpTrainPrimal(&1 ? 0 : 1, ae2f_NONE, reterr, mlp, inp, out, out_desired)
956
957#define __ae2f_AnnMlpTrain_imp(v_train, mlp, inp, out, goal, lenv, outstream, deltacache, weight, bias, lr_w, lr_b, act, actderiv, lossderiv)
958 __ae2f_AnnMlpTrainPrimal_imp(&1 ? 0 : 1, &1, v_train, mlp, inp, out, goal, lenv, outstream, deltacache, weight, bias, lr_w, lr_b, act, actderiv, lossderiv)
959
960#define __ae2f_AnnMlpTrainStream_imp(v_train, mlp, inp, out, goal, lenv, outstream, deltacache, weight, bias, lr_w, lr_b, act, actderiv, lossderiv)
961 __ae2f_AnnMlpTrainPrimal_imp(-1, ae2f_NONE, v_train, mlp, inp, out, goal, lenv, outstream, deltacache, weight, bias, lr_w, lr_b, act, actderiv, lossderiv)
962
963
964/** @see __ae2f_AnnMlpTrainAutoPrimal */
965#define __ae2f_AnnMlpTrainAuto_C(reterr, mlp, inp, out_desired)
966 __ae2f_AnnMlpTrainAutoPrimal(&1 ? 0 : 1, &1, reterr, mlp, inp, out_desired)
967
968/** @see __ae2f_AnnMlpTrainAutoPrimal */
969#define __ae2f_AnnMlpTrainAutoStream_C(reterr, mlp, inp, out_desired)
970 __ae2f_AnnMlpTrainAutoPrimal(-1, ae2f_NONE, reterr, mlp, inp, out_desired)
971
972#endif
973
974
975
976#undef __ae2f_MACRO_GENERATED
977
978#define __ae2f_MACRO_GENERATED 0
#define ae2f_AnnActDerivFFN_PASS(r, o, i, c)
Definition Act.h:28
#define ae2f_AnnActFFN_PASS(r, o, i, c)
Definition Act.h:27
#define ae2f_structdef(key, name)
Definition Cast.h:110
#define ae2f_reinterpret_cast(t, v)
Definition Cast.h:52
#define unless(...)
Invokes when condition is false.
Definition Cast.h:103
#define ae2f_extern
Suggests the existence of external variable or function, in naming of C. [non-mangling].
Definition Cast.h:88
#define ae2f_structdef_n(key, name,...)
Definition Cast.h:109
#define ae2f_reg
Register keyword.
Definition Reg.h:12
#define ae2f_WhenCXX(...)
Appears when the current language is C.
Definition Cxx.h:37
#define ae2f_NONE
Literally nothing.
Definition Cxx.h:16
#define ae2f_LP(...)
Definition Guide.h:23
#define ae2f_FREE(...)
Definition Guide.h:33
#define ae2f_opt
Definition Guide.h:26
#define ae2f_errGlob_ALLOC_FAILED
stdlib allocating functions (malloc, calloc, realloc) has been failed.
Definition errGlob.h:40
uint8_t ae2f_err_t
Informs that this number represents the error.
Definition errGlob.h:19
#define ae2f_errGlob_PTR_IS_NULL
Failed to refer the pointer either l-value inside the function.
Definition errGlob.h:32
#define ae2f_errGlob_IMP_NOT_FOUND
Failed to find the function on preprocessor which is callable for some reason No operation has beed d...
Definition errGlob.h:28
#define __ae2f_MACRO_GENERATED
Definition Conv.auto.h:2
#define ae2f_MAC_BUILD
Definition Util.h:5
#define ae2f_NEED_CLASS
Definition Mlp.cl.c:8
#define OPER_NONE
Definition Mlp.def.c:21
#define OPER_NEG
Definition Mlp.def.c:20
#define __ae2f_AnnMlpSz_imp(ret_sz, outc, weightc, depth, szswap, act, actderiv, deltastream, outcache, weight, bias)
Definition Mlp.auto.h:272
#define __ae2f_AnnMlpMk_C(reterr, retmk, depth, szvector, szswap_opt, act, actderiv, lossderiv, deltastream, outcache, weight, bias, learningrate, learningrate_bias, offset, extra)
Definition Mlp.auto.h:40
#define __ae2f_AnnMlpBwd_imp(v_tmp, v_send, slp_then, retdelta_then, deltaseed, actderiv_then, inp)
delta to delta
Definition Mlp.auto.h:588
#define __ae2f_AnnMlpHidDeltaSingle_imp(v_single, slp, weight, delta, iidx)
Definition Mlp.auto.h:566
#define __ae2f_AnnMlpPredictStream_imp(v_predict, mlp, inp, out, sz, weight, bias, outcache, act_opt)
Definition Mlp.auto.h:554
#define __ae2f_AnnMlpMk_imp(reg_mk, prm_depth, pprm_szvector, propptr_szswap_opt, lppfn_act_opt, lppfn_actderiv_opt, pfn_lossderiv, propptr_deltastream_opt, propptr_outcache_opt, propptr_weight_opt, propptr_bias_opt, prm_learningrate, prm_learningrate_bias, prm_offset_opt, prm_extra_opt)
Automatically allocates ae2f_AnnMlp and store its pointer at (reg_mk).m_mkbase.
Definition Mlp.auto.h:163
#define __ae2f_AnnMlpInitWithOutSz_imp(v_mlp, v_init, depth, outsz, weightsz, szvector, szswap_opt, act, actderiv, lossderiv, deltastream, outcache, weight, bias, learningrate, learningrate_bias)
Definition Mlp.auto.h:303
#define __ae2f_AnnMlpPredictStream_C(reterr, mlp, inp, out)
Definition Mlp.auto.h:557
#define __ae2f_AnnMlpPredict_C(reterr, mlp, inp, delta)
Definition Mlp.auto.h:563
#define __ae2f_AnnMlpTrainPrimal(OPER_NEG, OPER_NONE, reterr, mlp, inp, out, out_desired)
Definition Mlp.auto.h:884
#define __ae2f_AnnMlpFollowPrimal_imp(OPER_NEG, OPER_NONE, v_follow, mlp, inp, delta, lenv, outstream, deltacache, weight, bias, learningrate, learningrate_bias, actderiv)
Definition Mlp.auto.h:615
#define __ae2f_AnnMlpPredictPrimal_imp(OPER_NEG, OPER_NONE, v_predict, mlp, inp, out, sz, weight, bias, outcache, act_opt)
layer must be more than 2
Definition Mlp.auto.h:397
#define __ae2f_AnnMlpTrainAuto_C(reterr, mlp, inp, out_desired)
Definition Mlp.auto.h:961
#define __ae2f_AnnMlpTrainAutoPrimal(OPER_NEG, OPER_NONE, reterr, mlp, inp, out_desired)
Definition Mlp.auto.h:915
#define __ae2f_AnnMlpTrain_C(reterr, mlp, inp, out, out_desired)
Definition Mlp.auto.h:947
#define __ae2f_AnnMlpTrainAutoStream_C(reterr, mlp, inp, out_desired)
Definition Mlp.auto.h:965
#define __ae2f_AnnMlpDel_C(a)
Definition Mlp.auto.h:38
#define __ae2f_AnnMlpPredictPrimal(OPER_NEG, OPER_NONE, reterr, mlp, inp, out)
Definition Mlp.auto.h:524
#define __ae2f_AnnMlpTrainStream_C(reterr, mlp, inp, out, out_desired)
Definition Mlp.auto.h:950
#define __ae2f_AnnMlpFollowPrimal(OPER_NEG, OPER_NONE, reterr, mlp, inp, delta)
Definition Mlp.auto.h:785
#define __ae2f_AnnMlpTrainPrimal_imp(OPER_NEG, OPER_NONE, v_train, mlp, inp, out, out_desired, lenv, outstream, deltacache, weight, bias, learningrate, learningrate_bias, act, actderiv, lossderiv)
Definition Mlp.auto.h:822
#define __ae2f_AnnMlpFollowStream_C(reterr, mlp, inp, delta)
Definition Mlp.auto.h:819
#define __ae2f_AnnMlpFollow_C(reterr, mlp, inp, delta)
Definition Mlp.auto.h:816
#define ae2f_AnnMlpTrain
Definition Mlp.h:295
#define ae2f_AnnMlpTrainAutoStream
Definition Mlp.h:298
#define ae2f_AnnMlpFollow
Definition Mlp.h:293
#define ae2f_AnnMlpPredict
Definition Mlp.h:291
#define ae2f_AnnMlpTrainStream
Definition Mlp.h:296
#define ae2f_AnnMlpFollowStream
Definition Mlp.h:294
#define ae2f_AnnMlpPredictStream
Definition Mlp.h:292
#define ae2f_AnnMlpTrainAuto
Definition Mlp.h:297
#define __ae2f_AnnSlpFollow_imp(reg_follow, prm_slp, pprm_in, pprm_delta, ptr_weight, ptr_bias, prm_learningrate, prm_learningrate_bias)
Definition Slp.auto.h:435
#define __ae2f_AnnSlpInit_imp(...)
Definition Slp.auto.h:129
#define __ae2f_AnnSlpFit_C(...)
Definition Slp.auto.h:870
#define __ae2f_AnnSlpFollow_C(...)
Definition Slp.auto.h:524
#define __ae2f_AnnSlpFollowOne_imp(reg_follow, pprm_in, pprm_delta, ptr_weight, prm_learningrate, prm_learningrate_bias, prm_isz, prm_oidx, rret_bias)
Definition Slp.auto.h:410
#define __ae2f_AnnSlpMk_imp(...)
Definition Slp.auto.h:267
#define __ae2f_AnnSlpFetchDelta_imp(tmp_delta, prm_slp, pprm_out, pprm_out_desired, fn_actderiv, fn_lossderiv, pret_delta)
Definition Slp.auto.h:586
#define __ae2f_AnnSlpTrainVerbose_imp(ram_train, reg_train, prm_slp, pprm_inp, pret_out, ptr_out_cache, pprm_out_desired, ptr_weights, ptr_bias, ptr_cachedelta, fn_act, fn_actderiv, fn_lossderiv, prm_learningrate, prm_learningrate_bias)
Definition Slp.auto.h:874
#define __ae2f_AnnSlpPredict(err_opt, _this, prm_in, out, out_cache, weight, bias, act_opt)
Definition Slp.auto.h:311
#define __ae2f_AnnSlpFitVerbose_imp(ram_fit, reg_fit, prm_slp, pprm_inp, pprm_out, pprm_out_desired, ptr_weights, ptr_bias, ptr_cachedelta, fn_actderiv, fn_lossderiv, prm_learningrate, prm_learningrate_bias)
Definition Slp.auto.h:706
#define __ae2f_AnnSlpInit(...)
Definition Slp.auto.h:133
#define __ae2f_AnnSlpFit(reterr_opt, _this, prm_inp, prm_out, prm_out_desired, weights, bias, cachedelta, actderiv_opt, lossderiv, learningrate, learningrate_bias)
Definition Slp.auto.h:769
#define __ae2f_AnnSlpFetchDelta_C(...)
Definition Slp.auto.h:701
#define __ae2f_AnnSlpDel_C
Definition Slp.auto.h:20
#define __ae2f_AnnSlpTrain_C
Definition Slp.auto.h:984
#define __ae2f_AnnSlpInit_C
Definition Slp.auto.h:139
#define __ae2f_AnnSlpFetchDeltaVerbose_imp(ram_delta, reg_delta, prm_slp, pprm_out, pprm_out_desired, fn_actderiv, fn_lossderiv, pret_delta)
Definition Slp.auto.h:550
#define __ae2f_AnnSlpFollow(reterr_opt, _this, prm_in, delta, weight, bias, learningrate, learningrate_bias)
Definition Slp.auto.h:464
#define __ae2f_AnnSlpPredict_imp(reg_predict, prmreg_slp, pprm_in, ret, ptr_outcache, pprm_weight, pprm_bias, fn_act)
Definition Slp.auto.h:278
#define __ae2f_AnnSlpTrain(err, slp, inp, out_cache, out_desired, weights, bias, cachedelta, act, actderiv, lossderiv, learningrate, learningrate_bias)
Definition Slp.auto.h:989
#define __ae2f_AnnSlpInitInpSz_imp(...)
Definition Slp.auto.h:125
#define __ae2f_AnnSlpMk(...)
Definition Slp.auto.h:271
#define __ae2f_AnnSlpMk_C
Definition Slp.auto.h:275
#define __ae2f_AnnSlpPredict_C(...)
Definition Slp.auto.h:376
#define ae2f_AnnSlpMk
Definition Slp.h:263
#define ae2f_AnnSlpDel
Definition Slp.h:264
#define ae2f_AnnSlpTrain
Definition Slp.h:268
#define ae2f_AnnSlpPredict
Definition Slp.h:265
#define ae2f_AnnSlpFollow
Definition Slp.h:266
#define ae2f_AnnSlpFit
Definition Slp.h:267
#define ae2f_MAC(...)
Definition mac.h:28