AIfES 2  2.0.0
aialgo_sequential_inference.h
Go to the documentation of this file.
1 
24 #ifndef AIALGO_SEQUENTIAL_INFERENCE
25 #define AIALGO_SEQUENTIAL_INFERENCE
26 
27 #include "core/aifes_core.h"
28 #include "core/aifes_math.h"
31 
42 
51 
61 uint8_t aialgo_schedule_inference_memory(aimodel_t *model, void *memory_ptr, uint32_t memory_size);
62 
75 void aialgo_distribute_parameter_memory(aimodel_t *model, void *memory_ptr, uint32_t memory_size);
76 
88 
115 uint8_t aialgo_inference_model(aimodel_t *model, aitensor_t *input_data, aitensor_t *output_data);
116 
125 
132 void aialgo_quantize_model_f32_to_q7(aimodel_t *model_f32, aimodel_t *model_q7, aitensor_t *representative_dataset);
133 
144 
154 void aialgo_set_model_delta_precision_q31(aimodel_t *model, uint16_t shift);
155 
165 
171 
179 void aialgo_set_layer_settings_model(aimodel_t *model, uint32_t bitmask, uint8_t shift, uint32_t value);
180 
196 void aialgo_set_training_mode_model(aimodel_t *model, uint8_t value);
197 
215 void aialgo_set_batch_mode_model(aimodel_t *model, uint8_t value);
216 
231 void aialgo_set_trainable_model(aimodel_t *model, uint8_t value);
232 
233 #endif // AIALGO_SEQUENTIAL_INFERENCE
void aialgo_quantize_model_f32_to_q7(aimodel_t *model_f32, aimodel_t *model_q7, aitensor_t *representative_dataset)
Quantize model parameters (weights and bias)
void aialgo_set_model_result_precision_q31(aimodel_t *model, uint16_t shift)
Initialize the quantization parameters of the layer results for Q31 data type.
void aialgo_distribute_parameter_memory(aimodel_t *model, void *memory_ptr, uint32_t memory_size)
Assign the memory for the trainable parameters (like weights, bias, ...) of the model.
uint8_t aialgo_inference_model(aimodel_t *model, aitensor_t *input_data, aitensor_t *output_data)
Perform an inference on the model / Run the model.
void aialgo_set_training_mode_model(aimodel_t *model, uint8_t value)
Enables / disables the training mode of the model.
void aialgo_set_batch_mode_model(aimodel_t *model, uint8_t value)
Enables / disables the batch mode of the model.
void aialgo_print_model_structure(aimodel_t *model)
Print the layer structure of the model with the configured parameters.
uint8_t aialgo_compile_model(aimodel_t *model)
Initialize the model structure.
void aialgo_set_model_gradient_precision_q31(aimodel_t *model, uint16_t shift)
Initialize the quantization parameters of the gradients for Q31 data type.
void aialgo_set_trainable_model(aimodel_t *model, uint8_t value)
Freeze / Unfreeze trainable parameters of the model.
aitensor_t * aialgo_forward_model(aimodel_t *model, aitensor_t *input_data)
Perform a forward pass on the model.
void aialgo_set_model_delta_precision_q31(aimodel_t *model, uint16_t shift)
Initialize the quantization parameters of the layer deltas for Q31 data type.
uint8_t aialgo_schedule_inference_memory(aimodel_t *model, void *memory_ptr, uint32_t memory_size)
Assign the memory for intermediate results of an inference to the model.
uint32_t aialgo_sizeof_inference_memory(aimodel_t *model)
Calculate the memory requirements for intermediate results of an inference.
uint32_t aialgo_sizeof_parameter_memory(aimodel_t *model)
Calculate the memory requirements for the trainable parameters (like weights, bias,...
void aialgo_set_layer_settings_model(aimodel_t *model, uint32_t bitmask, uint8_t shift, uint32_t value)
Apply the specified setting to all layers in the model.
AIfES 2 core interface.
AIfES 2 math interface.
Default implementation of the Dense layer .
Basic data-type independent math operations.
AIfES artificial neural network model.
Definition: aifes_core.h:181
A tensor in AIfES.
Definition: aifes_math.h:89