Microcontroller using Tensorflow Lite

- 4 mins

Below is an example in C that uses TensorFlow Lite for Microcontrollers to run a simple CNN model for image classification on an embedded system. In this snippet, all required libraries and headers are included. (Keep in mind that you’ll need to supply the actual model data via the cnn_model_data.h header, which must define the byte array cnn_model_data containing your TensorFlow Lite model.)

/*
 * Example: CNN Image Classification on an Embedded Device using TFLite for Microcontrollers
 */

#include <stdio.h>
#include <stdint.h>

// TensorFlow Lite for Microcontrollers headers:
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/version.h"

// Include your CNN model data; this header should define the cnn_model_data array.
// Use the conversion script provided by TFLite to generate this file from your .tflite model.
#include "cnn_model_data.h"

// Define the tensor arena size. Adjust this value based on your model's requirements and available RAM.
#define TENSOR_ARENA_SIZE (16 * 1024)
uint8_t tensor_arena[TENSOR_ARENA_SIZE];

int main() {
    // Load the TFLite model from the provided data.
    const tflite::Model* model = tflite::GetModel(cnn_model_data);
    if (model->version() != TFLITE_SCHEMA_VERSION) {
        printf("Model schema version mismatch! Expected: %d, Got: %d\n",
               TFLITE_SCHEMA_VERSION, model->version());
        return -1;
    }

    // Create an operator resolver and register the operations used in the CNN model.
    // Adjust the number of allowed ops (template parameter) according to how many you need.
    tflite::MicroMutableOpResolver<5> micro_op_resolver;
    micro_op_resolver.AddConv2D();
    micro_op_resolver.AddMaxPool2D();
    micro_op_resolver.AddFullyConnected();
    micro_op_resolver.AddSoftmax();
    // Add additional ops if your model needs them.

    // Instantiate the interpreter.
    tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena, TENSOR_ARENA_SIZE);

    // Allocate memory for the model's tensors.
    if (interpreter.AllocateTensors() != kTfLiteOk) {
        printf("AllocateTensors() failed\n");
        return -1;
    }

    // Obtain pointers to the input and output tensors.
    TfLiteTensor* input_tensor = interpreter.input(0);
    TfLiteTensor* output_tensor = interpreter.output(0);

    // For demonstration, fill the input tensor with a constant mid-gray value.
    // In a real scenario, this would be replaced with actual image data.
    for (int i = 0; i < input_tensor->bytes; i++) {
        input_tensor->data.uint8[i] = 128; // Mid-gray pixel value.
    }

    // Run model inference.
    if (interpreter.Invoke() != kTfLiteOk) {
        printf("Model inference failed!\n");
        return -1;
    }

    // Print the output from the model.
    // Note: How you interpret the output tensor depends on the model design.
    printf("CNN Classification Result: %d\n", output_tensor->data.uint8[0]);

    return 0;
}

Explanation